diff --git a/.gitattributes b/.gitattributes index 264e058..323221b 100644 --- a/.gitattributes +++ b/.gitattributes @@ -21,3 +21,5 @@ *.dae filter=lfs diff=lfs merge=lfs -text *.so filter=lfs diff=lfs merge=lfs -text *.so.* filter=lfs diff=lfs merge=lfs -text +# Docs static assets must NOT use LFS — GitHub Pages can't serve LFS pointers +docs/source/_static/** filter= diff= merge= diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 0000000..4ecd80b --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,74 @@ +name: Build and Deploy Documentation + +on: + push: + branches: + - main + - gear-sonic + paths: + - "docs/**" + - ".github/workflows/docs.yml" + - ".gitattributes" + workflow_dispatch: + +# Allow only one concurrent deployment; cancel in-flight runs +concurrency: + group: "pages" + cancel-in-progress: true + +jobs: + build: + name: Build Sphinx Docs + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + lfs: false + + - name: Restore docs static assets (bypass git-lfs smudge) + run: | + # git-lfs on the runner rewrites files tracked by *.png/*.gif + # even when our .gitattributes override removes filter=lfs. + # Use git cat-file to write real binary content directly from + # the object store, bypassing all smudge filters. + git ls-tree -r HEAD -- docs/source/_static \ + | awk '{print $3, $4}' \ + | while IFS=" " read -r hash path; do + git cat-file blob "$hash" > "$path" + done + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.10" + cache: "pip" + cache-dependency-path: "docs/requirements.txt" + + - name: Install documentation dependencies + run: pip install -r docs/requirements.txt + + - name: Build HTML documentation + run: sphinx-build -b html docs/source docs/build/html + + - name: Upload Pages artifact + if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/gear-sonic' + uses: actions/upload-pages-artifact@v3 + with: + path: docs/build/html + + deploy: + name: Deploy to GitHub Pages + needs: build + if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/gear-sonic' + runs-on: ubuntu-latest + permissions: + pages: write + id-token: write + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 diff --git a/.gitignore b/.gitignore index 920382b..b78f56d 100644 --- a/.gitignore +++ b/.gitignore @@ -67,7 +67,7 @@ instance/ .scrapy # Sphinx documentation -docs/_build/ + # PyBuilder .pybuilder/ @@ -166,10 +166,29 @@ outputs/ .DS_Store +# Hugging Face upload/maintenance scripts (internal use only) +huggingface/ + +# Model checkpoints (download via download_from_hf.py) +*.onnx +!decoupled_wbc/sim2mujoco/resources/robots/g1/policy/GR00T-WholeBodyControl-Balance.onnx +!decoupled_wbc/sim2mujoco/resources/robots/g1/policy/GR00T-WholeBodyControl-Walk.onnx + # Mujoco MUJOCO_LOG.TXT # IsaacDeploy external_dependencies/isaac_teleop_app/isaac-deploy +external_dependencies/doc/ + +# XRoboToolkit pybind (cloned locally on aarch64 by install_pico.sh) +external_dependencies/XRoboToolkit-PC-Service-Pybind/ + +# UV +uv.lock +# XRoboToolkit-PC-Service-Pybind +xrobotoolkit_sdk.cpython-*-*-*.so +teleop_vids/ +*.code-workspace \ No newline at end of file diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 0000000..6cab5e7 --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,93 @@ +cff-version: 1.2.0 +message: "If you use this software, please cite it as below." +title: "GR00T Whole-Body Control" +authors: + - family-names: "Luo" + given-names: "Zhengyi" + - family-names: "Yuan" + given-names: "Ye" + - family-names: "Wang" + given-names: "Tingwu" + - family-names: "Li" + given-names: "Chenran" + - family-names: "Chen" + given-names: "Sirui" + - family-names: "Castañeda" + given-names: "Fernando" + - family-names: "Cao" + given-names: "Zi-Ang" + - family-names: "Li" + given-names: "Jiefeng" + - family-names: "Zhu" + given-names: "Yuke" +url: "https://github.com/NVlabs/GR00T-WholeBodyControl" +repository-code: "https://github.com/NVlabs/GR00T-WholeBodyControl" +type: software +keywords: + - humanoid-robotics + - reinforcement-learning + - whole-body-control + - motion-tracking + - teleoperation + - robotics + - pytorch +license: Apache-2.0 +preferred-citation: + type: article + title: "SONIC: Supersizing Motion Tracking for Natural Humanoid Whole-Body Control" + authors: + - family-names: "Luo" + given-names: "Zhengyi" + - family-names: "Yuan" + given-names: "Ye" + - family-names: "Wang" + given-names: "Tingwu" + - family-names: "Li" + given-names: "Chenran" + - family-names: "Chen" + given-names: "Sirui" + - family-names: "Castañeda" + given-names: "Fernando" + - family-names: "Cao" + given-names: "Zi-Ang" + - family-names: "Li" + given-names: "Jiefeng" + - family-names: "Minor" + given-names: "David" + - family-names: "Ben" + given-names: "Qingwei" + - family-names: "Da" + given-names: "Xingye" + - family-names: "Ding" + given-names: "Runyu" + - family-names: "Hogg" + given-names: "Cyrus" + - family-names: "Song" + given-names: "Lina" + - family-names: "Lim" + given-names: "Edy" + - family-names: "Jeong" + given-names: "Eugene" + - family-names: "He" + given-names: "Tairan" + - family-names: "Xue" + given-names: "Haoru" + - family-names: "Xiao" + given-names: "Wenli" + - family-names: "Wang" + given-names: "Zi" + - family-names: "Yuen" + given-names: "Simon" + - family-names: "Kautz" + given-names: "Jan" + - family-names: "Chang" + given-names: "Yan" + - family-names: "Iqbal" + given-names: "Umar" + - family-names: "Fan" + given-names: "Linxi" + - family-names: "Zhu" + given-names: "Yuke" + journal: "arXiv preprint" + year: 2025 + url: "https://arxiv.org/abs/2511.07820" diff --git a/LICENSE b/LICENSE index 7f9b118..0cd7b5a 100644 --- a/LICENSE +++ b/LICENSE @@ -1,38 +1,186 @@ -NVIDIA License +================================================================================ +DUAL LICENSE NOTICE +================================================================================ -1. Definitions +This repository is dual-licensed. Different components are under different terms: -“Licensor” means any person or entity that distributes its Work. -“Work” means (a) the original work of authorship made available under this license, which may include software, documentation, or other files, and (b) any additions to or derivative works thereof that are made available under this license. -The terms “reproduce,” “reproduction,” “derivative works,” and “distribution” have the meaning as provided under U.S. copyright law; provided, however, that for the purposes of this license, derivative works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work. -Works are “made available” under this license by including in or with the Work either (a) a copyright notice referencing the applicability of this license to the Work, or (b) a copy of this license. +1. SOURCE CODE - Apache License 2.0 + All source code, scripts, and software components -2. License Grant +2. MODEL WEIGHTS - NVIDIA Open Model License + All trained model checkpoints and weights -2.1 Copyright Grant. Subject to the terms and conditions of this license, each Licensor grants to you a perpetual, worldwide, non-exclusive, royalty-free, copyright license to use, reproduce, prepare derivative works of, publicly display, publicly perform, sublicense and distribute its Work and any resulting derivative works in any form. +See below for the full text of each license. -3. Limitations +================================================================================ +PART 1: SOURCE CODE LICENSE (Apache License 2.0) +================================================================================ -3.1 Redistribution. You may reproduce or distribute the Work only if (a) you do so under this license, (b) you include a complete copy of this license with your distribution, and (c) you retain without modification any copyright, patent, trademark, or attribution notices that are present in the Work. +Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -3.2 Derivative Works. You may specify that additional or different terms apply to the use, reproduction, and distribution of your derivative works of the Work (“Your Terms”) only if (a) Your Terms provide that the use limitation in Section 3.3 applies to your derivative works; (b) you comply with Other Licenses, and (c) you identify the specific derivative works that are subject to Your Terms and Other Licenses, as applicable. Notwithstanding Your Terms, this license (including the redistribution requirements in Section 3.1) will continue to apply to the Work itself. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at -3.3 Use Limitation. The Work and any derivative works thereof only may be used or intended for use non-commercially. As used herein, “non-commercially” means for non-commercial research purposes only, and excludes any military, surveillance, service of nuclear technology or biometric processing purposes. + http://www.apache.org/licenses/LICENSE-2.0 -3.4 Patent Claims. If you bring or threaten to bring a patent claim against any Licensor (including any claim, cross-claim or counterclaim in a lawsuit) to enforce any patents that you allege are infringed by any Work, then your rights under this license from such Licensor (including the grant in Section 2.1) will terminate immediately. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. -3.5 Trademarks. This license does not grant any rights to use any Licensor’s or its affiliates’ names, logos, or trademarks, except as necessary to reproduce the notices described in this license. -3.6 Termination. If you violate any term of this license, then your rights under this license (including the grant in Section 2.1) will terminate immediately. +================================================================================ +PART 2: MODEL WEIGHTS LICENSE (NVIDIA Open Model License) +================================================================================ -3.7 Components Under Other Licenses. The Work may include or be distributed with components provided with separate legal notices or terms that accompany the components, such as open source software licenses and other license terms, including but not limited to the Meta OPT-IML 175B License Agreement (“Other Licenses”). The components are subject to the applicable Other Licenses, including any proprietary notices, disclaimers, requirements and extended use rights; except that this Agreement will prevail regarding the use of third-party software, unless a third-party software license requires it license terms to prevail. +NVIDIA OPEN MODEL LICENSE AGREEMENT -4. Disclaimer of Warranty. +Last Modified: October 24, 2025 -THE WORK IS PROVIDED “AS IS” WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER THIS LICENSE. +NVIDIA Corporation and its affiliates ("NVIDIA") grants permission to use machine +learning models under specific conditions. Key permissions include creating +derivative models and distributing them, with NVIDIA retaining no ownership claims +over outputs generated by users. -5. Limitation of Liability. +SECTION 1: DEFINITIONS -EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION, LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +1.1 "Derivative Model" means any modification of, or works based on or derived +from, the Model, excluding outputs. +1.2 "Legal Entity" means the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. + +1.3 "Model" means the machine learning model, software, and any checkpoints, +weights, algorithms, parameters, configuration files, and documentation that NVIDIA +makes available under this Agreement. + +1.4 "NVIDIA Cosmos Model" means a multimodal Model that is covered by this Agreement. + +1.5 "Special-Purpose Model" means a Model that is limited to narrow, +purpose-specific tasks. + +1.6 "You" or "Your" means an individual or Legal Entity exercising permissions +granted by this Agreement. + +SECTION 2: CONDITIONS FOR USE, LICENSE GRANT, AI ETHICS AND IP OWNERSHIP + +2.1 Conditions for Use. You must comply with all terms and conditions of this +Agreement. If You initiate copyright or patent litigation against any entity +(including a cross-claim or counterclaim in a lawsuit) alleging that the Model +constitutes direct or contributory infringement, then Your licenses under this +Agreement shall terminate. If You circumvent any safety guardrails or safety +measures built in to the Model without providing comparable alternatives, Your +rights under this Agreement shall terminate. NVIDIA may update this Agreement at +any time to comply with applicable law; Your continued use constitutes Your +acceptance of the updated terms. + +2.2 License Grant. Subject to the terms and conditions of this Agreement, NVIDIA +hereby grants You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +revocable license to publicly perform, publicly display, reproduce, use, create +derivative works of, make, have made, sell, offer for sale, distribute and import +the Model. + +2.3 AI Ethics. Your use of the Model must be in accordance with NVIDIA's +Trustworthy AI terms, which can be found at +https://www.nvidia.com/en-us/agreements/trustworthy-ai/terms/. + +2.4 IP Ownership. NVIDIA owns the original Model and NVIDIA's Derivative Models. +You own Your Derivative Models. NVIDIA makes no claim of ownership to outputs. You +are responsible for outputs and their subsequent uses. + +SECTION 3: REDISTRIBUTION + +You may reproduce and distribute copies of the Model or Derivative Models thereof, +with or without modifications, provided that You meet the following conditions: + +a. You must include a copy of this Agreement. + +b. You must include the following attribution notice, which can appear in the same +location as other third-party notices or license information: "Licensed by NVIDIA +Corporation under the NVIDIA Open Model License." + +c. If You are distributing a NVIDIA Cosmos Model, You must also include the phrase +"Built on NVIDIA Cosmos" on the applicable website, in the user interface, in a +blog, in an "about" page, or in product documentation. + +d. You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications or for any Derivative Models as a whole, +provided Your use, reproduction, and distribution otherwise complies with this +Agreement. + +SECTION 4: SEPARATE COMPONENTS + +The Model may contain components that are subject to separate legal notices or +governed by separate licenses (including Open Source Software Licenses), as may be +described in any files made available with the Model. Your use of those separate +components is subject to the applicable license. This Agreement shall control over +the separate licenses for third-party Open Source Software to the extent that the +separate license imposes additional restrictions. "Open Source Software License" +means any software license approved by the Open Source Initiative, Free Software +Foundation, or similar recognized organization, or a license identified by SPDX. + +SECTION 5: TRADEMARKS + +This Agreement does not grant permission to use the trade names, trademarks, +service marks, or product names of NVIDIA, except as required for reasonable and +customary use in describing the origin of the Model and reproducing the content of +the notice. + +SECTION 6: DISCLAIMER OF WARRANTY + +NVIDIA provides the Model on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +ANY KIND, either express or implied, including, without limitation, any warranties +or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for reviewing the documentation +accompanying the Model and determining the appropriateness of using the Model, and +You understand that Special-Purpose Models are limited to narrow, purpose-specific +tasks and must not be deployed for uses that are beyond such tasks. + +SECTION 7: LIMITATION OF LIABILITY + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate and +grossly negligent acts) or agreed to in writing, will NVIDIA be liable to You for +damages, including any direct, indirect, special, incidental, or consequential +damages of any character arising as a result of this Agreement or out of the use +or inability to use the Model or Derivative Models or outputs (including but not +limited to damages for loss of goodwill, work stoppage, computer failure or +malfunction, or any and all other commercial damages or losses), even if NVIDIA has +been advised of the possibility of such damages. + +SECTION 8: INDEMNITY + +You will defend, indemnify and hold harmless NVIDIA and its affiliates, and their +respective employees, contractors, directors, officers and agents, from and against +any and all claims, damages, obligations, losses, liabilities, costs or debt, and +expenses (including but not limited to attorney's fees) arising from Your use or +distribution of the Model or Derivative Models or outputs. + +SECTION 9: FEEDBACK + +NVIDIA may use feedback You provide without restriction and without any +compensation to You. + +SECTION 10: GOVERNING LAW + +This Agreement will be governed in all respects by the laws of the United States +and of the State of Delaware, without regard to conflict of laws provisions. The +federal and state courts residing in Santa Clara County, California shall have +exclusive jurisdiction over any dispute arising out of this Agreement, and You +hereby consent to the personal jurisdiction of such courts. However, NVIDIA shall +have the right to seek injunctive relief in any court of competent jurisdiction. + +SECTION 11: TRADE AND COMPLIANCE + +You shall comply with all applicable import, export, trade, and economic sanctions +laws, including without limitation the Export Administration Regulations and +economic sanctions laws implemented by the Office of Foreign Assets Control, that +restrict or govern the destination, end-user and end-use of NVIDIA products, +technology, software, and services. + +--- + +Version Release Date: October 24, 2025 diff --git a/README.md b/README.md index b593832..e692bdb 100644 --- a/README.md +++ b/README.md @@ -1,140 +1,246 @@ -# gr00t_wbc +
-Software stack for loco-manipulation experiments across multiple humanoid platforms, with primary support for the Unitree G1. This repository provides whole-body control policies, a teleoperation stack, and a data exporter. + GEAR SONIC Header + + + + +
+ +
+ +[![License](https://img.shields.io/badge/License-Apache%202.0-76B900.svg)](LICENSE) +[![IsaacLab](https://img.shields.io/badge/IsaacLab-2.3.0-orange.svg)](https://github.com/isaac-sim/IsaacLab/releases/tag/v2.3.0) +[![Documentation](https://img.shields.io/badge/docs-GitHub%20Pages-76B900.svg)](https://nvlabs.github.io/GR00T-WholeBodyControl/) + +
--- -## System Installation -### Prerequisites -- Ubuntu 22.04 -- NVIDIA GPU with a recent driver -- Docker and NVIDIA Container Toolkit (required for GPU access inside the container) -### Repository Setup -Install Git and Git LFS: -```bash -sudo apt update -sudo apt install git git-lfs -git lfs install -``` -Clone the repository: +# GR00T-WholeBodyControl + +This is the codebase for the **GR00T Whole-Body Control (WBC)** projects. It hosts model checkpoints and scripts for training, evaluating, and deploying advanced whole-body controllers for humanoid robots. We currently support: + +- **Decoupled WBC**: the decoupled controller (RL for lower body, and IK for upper body) used in NVIDIA GR00T [N1.5](https://research.nvidia.com/labs/gear/gr00t-n1_5/) and [N1.6](https://research.nvidia.com/labs/gear/gr00t-n1_6/) models; +- **GEAR-SONIC Series**: our latest iteration of generalist humanoid whole-body controllers (see our [whitepaper](https://nvlabs.github.io/GEAR-SONIC/)). + +## Table of Contents + +- [GEAR-SONIC](#gear-sonic) +- [VR Whole-Body Teleoperation](#vr-whole-body-teleoperation) +- [Kinematic Planner](#kinematic-planner) +- [TODOs](#todos) +- [What's Included](#whats-included) + - [Setup](#setup) +- [Documentation](#documentation) +- [Citation](#citation) +- [License](#license) +- [Support](#support) +- [Decoupled WBC](#decoupled-wbc) + + +## GEAR-SONIC + +

+ Website | + Model | + Paper +

+ +
+ + +
+ +SONIC is a humanoid behavior foundation model that gives robots a core set of motor skills learned from large-scale human motion data. Rather than building separate controllers for predefined motions, SONIC uses motion tracking as a scalable training task, enabling a single unified policy to produce natural, whole-body movement and support a wide range of behaviors — from walking and crawling to teleoperation and multi-modal control. It is designed to generalize beyond the motions it has seen during training and to serve as a foundation for higher-level planning and interaction. + +In this repo, we will release SONIC's training code, deployment framework, model checkpoints, and teleoperation stack for data collection. + + +## VR Whole-Body Teleoperation + +SONIC supports real-time whole-body teleoperation via PICO VR headset, enabling natural human-to-robot motion transfer for data collection and interactive control. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
WalkingRunning
Sideways MovementKneeling
Getting UpJumping
Bimanual ManipulationObject Hand-off
+
+ +## Kinematic Planner + +SONIC includes a kinematic planner for real-time locomotion generation — choose a movement style, steer with keyboard/gamepad, and adjust speed and height on the fly. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
In-the-Wild Navigation
RunHappy
StealthInjured
KneelingHand Crawling
Elbow CrawlingBoxing
+
+ +## TODOs + +- [x] Release pretrained SONIC policy checkpoints +- [x] Open source C++ inference stack +- [x] Setup documentation +- [x] Open source teleoperation stack and demonstration scripts +- [ ] Release training scripts and recipes for motion imitation and fine-tuning +- [ ] Open source large-scale data collection workflows and fine-tuning VLA scripts. +- [ ] Publish additional preprocessed large-scale human motion datasets + + + +## What's Included + +This release includes: + +- **`gear_sonic_deploy`**: C++ inference stack for deploying SONIC policies on real hardware +- **`gear_sonic`**: Teleoperation stack for collecting demonstration data (no training code, YET.) + +### Setup + +Clone the repository with Git LFS: ```bash -mkdir -p ~/Projects -cd ~/Projects -git clone https://github.com/NVlabs/gr00t_wbc.git -cd gr00t_wbc +git clone https://github.com/NVlabs/GR00T-WholeBodyControl.git +cd GR00T-WholeBodyControl +git lfs pull ``` -### Docker Environment -We provide a Docker image with all dependencies pre-installed. +## Documentation -Install a fresh image and start a container: -```bash -./docker/run_docker.sh --install --root -``` -This pulls the latest `gr00t_wbc` image from `docker.io/nvgear`. +📚 **[Full Documentation](https://nvlabs.github.io/GR00T-WholeBodyControl/)** -Start or re-enter a container: -```bash -./docker/run_docker.sh --root -``` +### Getting Started +- [Installation Guide](https://nvlabs.github.io/GR00T-WholeBodyControl/getting_started/installation_deploy.html) +- [Quick Start](https://nvlabs.github.io/GR00T-WholeBodyControl/getting_started/quickstart.html) +- [VR Teleoperation Setup](https://nvlabs.github.io/GR00T-WholeBodyControl/getting_started/vr_teleop_setup.html) -Use `--root` to run as the `root` user. To run as a normal user, build the image locally: -```bash -./docker/run_docker.sh --build -``` ---- +### Tutorials +- [Keyboard Control](https://nvlabs.github.io/GR00T-WholeBodyControl/tutorials/keyboard.html) +- [Gamepad Control](https://nvlabs.github.io/GR00T-WholeBodyControl/tutorials/gamepad.html) +- [ZMQ Communication](https://nvlabs.github.io/GR00T-WholeBodyControl/tutorials/zmq.html) +- [ZMQ Manager / PICO VR](https://nvlabs.github.io/GR00T-WholeBodyControl/tutorials/vr_wholebody_teleop.html) -## Running the Control Stack - -Once inside the container, the control policies can be launched directly. - -- Simulation: - ```bash - python gr00t_wbc/control/main/teleop/run_g1_control_loop.py - ``` -- Real robot: Ensure the host machine network is configured per the [G1 SDK Development Guide](https://support.unitree.com/home/en/G1_developer) and set a static IP at `192.168.123.222`, subnet mask `255.255.255.0`: - ```bash - python gr00t_wbc/control/main/teleop/run_g1_control_loop.py --interface real - ``` - -Keyboard shortcuts (terminal window): -- `]`: Activate policy -- `o`: Deactivate policy -- `9`: Release / Hold the robot -- `w` / `s`: Move forward / backward -- `a` / `d`: Strafe left / right -- `q` / `e`: Rotate left / right -- `z`: Zero navigation commands -- `1` / `2`: Raise / lower the base height -- `backspace` (viewer): Reset the robot in the visualizer +### Best Practices +- [Teleoperation](https://nvlabs.github.io/GR00T-WholeBodyControl/user_guide/teleoperation.html) ---- -## Running the Teleoperation Stack -The teleoperation policy primarily uses Pico controllers for coordinated hand and body control. It also supports other teleoperation devices, including LeapMotion and HTC Vive with Nintendo Switch Joy-Con controllers. -Keep `run_g1_control_loop.py` running, and in another terminal run: -```bash -python gr00t_wbc/control/main/teleop/run_teleop_policy_loop.py --hand_control_device=pico --body_control_device=pico + +--- + +## Citation + +If you use GEAR-SONIC in your research, please cite: + +```bibtex +@article{luo2025sonic, + title={SONIC: Supersizing Motion Tracking for Natural Humanoid Whole-Body Control}, + author={Luo, Zhengyi and Yuan, Ye and Wang, Tingwu and Li, Chenran and Chen, Sirui and Casta\~neda, Fernando and Cao, Zi-Ang and Li, Jiefeng and Minor, David and Ben, Qingwei and Da, Xingye and Ding, Runyu and Hogg, Cyrus and Song, Lina and Lim, Edy and Jeong, Eugene and He, Tairan and Xue, Haoru and Xiao, Wenli and Wang, Zi and Yuen, Simon and Kautz, Jan and Chang, Yan and Iqbal, Umar and Fan, Linxi and Zhu, Yuke}, + journal={arXiv preprint arXiv:2511.07820}, + year={2025} +} ``` -### Pico Setup and Controls -Configure the teleop app on your Pico headset by following the [XR Robotics guidelines](https://github.com/XR-Robotics). +--- -The necessary PC software is pre-installed in the Docker container. Only the [XRoboToolkit-PC-Service](https://github.com/XR-Robotics/XRoboToolkit-PC-Service) component is needed. +## License -Prerequisites: Connect the Pico to the same network as the host computer. +This project uses dual licensing: -Controller bindings: -- `menu + left trigger`: Toggle lower-body policy -- `menu + right trigger`: Toggle upper-body policy -- `Left stick`: X/Y translation -- `Right stick`: Yaw rotation -- `L/R triggers`: Control hand grippers +- **Source Code**: Licensed under Apache License 2.0 - applies to all code, scripts, and software components in this repository +- **Model Weights**: Licensed under NVIDIA Open Model License - applies to all trained model checkpoints and weights -Pico unit test: -```bash -python gr00t_wbc/control/teleop/streamers/pico_streamer.py -``` +See [LICENSE](LICENSE) for the complete dual-license text. + +Please review both licenses before using this project. The NVIDIA Open Model License permits commercial use with attribution and requires compliance with NVIDIA's Trustworthy AI terms. + +All required legal documents, including the Apache 2.0 license, 3rd-party attributions, and DCO language, are consolidated in the /legal folder of this repository. --- -## Running the Data Collection Stack +## Support -Run the full stack (control loop, teleop policy, and camera forwarder) via the deployment helper: -```bash -python scripts/deploy_g1.py \ - --interface sim \ - --camera_host localhost \ - --sim_in_single_process \ - --simulator robocasa \ - --image-publish \ - --enable-offscreen \ - --env_name PnPBottle \ - --hand_control_device=pico \ - --body_control_device=pico -``` +For questions and issues, please contact the GEAR WBC team at [gear-wbc@nvidia.com](gear-wbc@nvidia.com) to provide feedback! -The `tmux` session `g1_deployment` is created with panes for: -- `control_data_teleop`: Main control loop, data collection, and teleoperation policy -- `camera`: Camera forwarder -- `camera_viewer`: Optional live camera feed +## Decoupled WBC -Operations in the `controller` window (`control_data_teleop` pane, left): -- `]`: Activate policy -- `o`: Deactivate policy -- `k`: Reset the simulation and policies -- `` ` ``: Terminate the tmux session -- `ctrl + d`: Exit the shell in the pane +For the Decoupled WBC used in GR00T N1.5 and N1.6 models, please refer to the [Decoupled WBC documentation](docs/source/references/decoupled_wbc.md). -Operations in the `data exporter` window (`control_data_teleop` pane, right top): -- Enter the task prompt -Operations on Pico controllers: -- `A`: Start/Stop recording -- `B`: Discard trajectory +## Acknowledgments +We would like to acknowledge the following projects from which parts of the code in this repo are derived from: +- [Beyond Mimic](https://github.com/HybridRobotics/whole_body_tracking) +- [Isaac Lab](https://github.com/isaac-sim/IsaacLab) diff --git a/gr00t_wbc/__init__.py b/decoupled_wbc/__init__.py similarity index 100% rename from gr00t_wbc/__init__.py rename to decoupled_wbc/__init__.py diff --git a/gr00t_wbc/control/__init__.py b/decoupled_wbc/control/__init__.py similarity index 100% rename from gr00t_wbc/control/__init__.py rename to decoupled_wbc/control/__init__.py diff --git a/gr00t_wbc/control/base/__init__.py b/decoupled_wbc/control/base/__init__.py similarity index 100% rename from gr00t_wbc/control/base/__init__.py rename to decoupled_wbc/control/base/__init__.py diff --git a/gr00t_wbc/control/base/env.py b/decoupled_wbc/control/base/env.py similarity index 100% rename from gr00t_wbc/control/base/env.py rename to decoupled_wbc/control/base/env.py diff --git a/decoupled_wbc/control/base/humanoid_env.py b/decoupled_wbc/control/base/humanoid_env.py new file mode 100644 index 0000000..5951597 --- /dev/null +++ b/decoupled_wbc/control/base/humanoid_env.py @@ -0,0 +1,60 @@ +from abc import abstractmethod + +from decoupled_wbc.control.base.env import Env +from decoupled_wbc.control.base.sensor import Sensor +from decoupled_wbc.control.robot_model.robot_model import RobotModel + + +class Hands: + """Container class for left and right hand environments. + + Attributes: + left: Environment for the left hand + right: Environment for the right hand + """ + + left: Env + right: Env + + +class HumanoidEnv(Env): + """Base class for humanoid robot environments. + + This class provides the interface for accessing the robot's body, hands, and sensors. + """ + + def body(self) -> Env: + """Get the robot's body environment. + + Returns: + Env: The body environment + """ + pass + + def hands(self) -> Hands: + """Get the robot's hands. + + Returns: + Hands: Container with left and right hand environments + """ + pass + + def sensors(self) -> dict[str, Sensor]: + """Get the sensors of this environment + + Returns: + dict: A dictionary of sensors + """ + pass + + @abstractmethod + def robot_model(self) -> RobotModel: + """Get the robot model of this environment + This robot model is used to dispatch whole body actions to body + and hand actuators and to reconstruct proprioceptive + observations from body and hands. + + Returns: + RobotModel: The robot model + """ + pass diff --git a/gr00t_wbc/control/base/policy.py b/decoupled_wbc/control/base/policy.py similarity index 100% rename from gr00t_wbc/control/base/policy.py rename to decoupled_wbc/control/base/policy.py diff --git a/gr00t_wbc/control/base/sensor.py b/decoupled_wbc/control/base/sensor.py similarity index 100% rename from gr00t_wbc/control/base/sensor.py rename to decoupled_wbc/control/base/sensor.py diff --git a/gr00t_wbc/control/envs/__init__.py b/decoupled_wbc/control/envs/__init__.py similarity index 100% rename from gr00t_wbc/control/envs/__init__.py rename to decoupled_wbc/control/envs/__init__.py diff --git a/gr00t_wbc/control/envs/g1/__init__.py b/decoupled_wbc/control/envs/g1/__init__.py similarity index 100% rename from gr00t_wbc/control/envs/g1/__init__.py rename to decoupled_wbc/control/envs/g1/__init__.py diff --git a/decoupled_wbc/control/envs/g1/g1_body.py b/decoupled_wbc/control/envs/g1/g1_body.py new file mode 100644 index 0000000..913563c --- /dev/null +++ b/decoupled_wbc/control/envs/g1/g1_body.py @@ -0,0 +1,67 @@ +from typing import Any, Dict + +import gymnasium as gym +import numpy as np + +from decoupled_wbc.control.base.env import Env +from decoupled_wbc.control.envs.g1.utils.command_sender import BodyCommandSender +from decoupled_wbc.control.envs.g1.utils.state_processor import BodyStateProcessor + + +class G1Body(Env): + def __init__(self, config: Dict[str, Any]): + super().__init__() + self.body_state_processor = BodyStateProcessor(config=config) + self.body_command_sender = BodyCommandSender(config=config) + + def observe(self) -> dict[str, any]: + body_state = self.body_state_processor._prepare_low_state() # (1, 148) + assert body_state.shape == (1, 148) + body_q = body_state[ + 0, 7 : 7 + 12 + 3 + 7 + 7 + ] # leg (12) + waist (3) + left arm (7) + right arm (7) + body_dq = body_state[0, 42 : 42 + 12 + 3 + 7 + 7] + body_ddq = body_state[0, 112 : 112 + 12 + 3 + 7 + 7] + body_tau_est = body_state[0, 77 : 77 + 12 + 3 + 7 + 7] + floating_base_pose = body_state[0, 0:7] + floating_base_vel = body_state[0, 36:42] + floating_base_acc = body_state[0, 106:112] + torso_quat = body_state[0, 141:145] + torso_ang_vel = body_state[0, 145:148] + + return { + "body_q": body_q, + "body_dq": body_dq, + "body_ddq": body_ddq, + "body_tau_est": body_tau_est, + "floating_base_pose": floating_base_pose, + "floating_base_vel": floating_base_vel, + "floating_base_acc": floating_base_acc, + "torso_quat": torso_quat, + "torso_ang_vel": torso_ang_vel, + } + + def queue_action(self, action: dict[str, any]): + # action should contain body_q, body_dq, body_tau + self.body_command_sender.send_command( + action["body_q"], action["body_dq"], action["body_tau"] + ) + + def observation_space(self) -> gym.Space: + return gym.spaces.Dict( + { + "body_q": gym.spaces.Box(low=-np.inf, high=np.inf, shape=(29,)), + "body_dq": gym.spaces.Box(low=-np.inf, high=np.inf, shape=(29,)), + "floating_base_pose": gym.spaces.Box(low=-np.inf, high=np.inf, shape=(7,)), + "floating_base_vel": gym.spaces.Box(low=-np.inf, high=np.inf, shape=(6,)), + } + ) + + def action_space(self) -> gym.Space: + return gym.spaces.Dict( + { + "body_q": gym.spaces.Box(low=-np.inf, high=np.inf, shape=(29,)), + "body_dq": gym.spaces.Box(low=-np.inf, high=np.inf, shape=(29,)), + "body_tau": gym.spaces.Box(low=-np.inf, high=np.inf, shape=(29,)), + } + ) diff --git a/decoupled_wbc/control/envs/g1/g1_env.py b/decoupled_wbc/control/envs/g1/g1_env.py new file mode 100644 index 0000000..db2ef93 --- /dev/null +++ b/decoupled_wbc/control/envs/g1/g1_env.py @@ -0,0 +1,324 @@ +from copy import deepcopy +from typing import Dict + +import gymnasium as gym +import numpy as np +from scipy.spatial.transform import Rotation as R + +from decoupled_wbc.control.base.humanoid_env import Hands, HumanoidEnv +from decoupled_wbc.control.envs.g1.g1_body import G1Body +from decoupled_wbc.control.envs.g1.g1_hand import G1ThreeFingerHand +from decoupled_wbc.control.envs.g1.sim.simulator_factory import SimulatorFactory, init_channel +from decoupled_wbc.control.envs.g1.utils.joint_safety import JointSafetyMonitor +from decoupled_wbc.control.robot_model.instantiation.g1 import instantiate_g1_robot_model +from decoupled_wbc.control.robot_model.robot_model import RobotModel +from decoupled_wbc.control.utils.ros_utils import ROSManager + + +class G1Env(HumanoidEnv): + def __init__( + self, + env_name: str = "default", + robot_model: RobotModel = None, + wbc_version: str = "v2", + config: Dict[str, any] = None, + **kwargs, + ): + super().__init__() + self.robot_model = deepcopy(robot_model) # need to cache FK results + self.config = config + + # Initialize safety monitor (visualization disabled) + self.safety_monitor = JointSafetyMonitor( + robot_model, enable_viz=False, env_type=self.config.get("ENV_TYPE", "real") + ) + self.last_obs = None + self.last_safety_ok = True # Track last safety status from queue_action + + init_channel(config=self.config) + + # Initialize body and hands + self._body = G1Body(config=self.config) + + self.with_hands = config.get("with_hands", False) + + # Gravity compensation settings + self.enable_gravity_compensation = config.get("enable_gravity_compensation", False) + self.gravity_compensation_joints = config.get("gravity_compensation_joints", ["arms"]) + + if self.enable_gravity_compensation: + print( + f"Gravity compensation enabled for joint groups: {self.gravity_compensation_joints}" + ) + if self.with_hands: + self._hands = Hands() + self._hands.left = G1ThreeFingerHand(is_left=True) + self._hands.right = G1ThreeFingerHand(is_left=False) + + # Initialize simulator if in simulation mode + self.use_sim = self.config.get("ENV_TYPE") == "sim" + + if self.use_sim: + # Create simulator using factory + + kwargs.update( + { + "onscreen": self.config.get("ENABLE_ONSCREEN", True), + "offscreen": self.config.get("ENABLE_OFFSCREEN", False), + } + ) + self.sim = SimulatorFactory.create_simulator( + config=self.config, + env_name=env_name, + wbc_version=wbc_version, + body_ik_solver_settings_type=kwargs.get("body_ik_solver_settings_type", "default"), + **kwargs, + ) + else: + self.sim = None + + # using the real robot + self.calibrate_hands() + + # Initialize ROS 2 node + self.ros_manager = ROSManager(node_name="g1_env") + self.ros_node = self.ros_manager.node + + self.delay_list = [] + self.visualize_delay = False + self.print_delay_interval = 100 + self.cnt = 0 + + def start_simulator(self): + # imag epublish disabled since the sim is running in a sub-thread + SimulatorFactory.start_simulator(self.sim, as_thread=True, enable_image_publish=False) + + def step_simulator(self): + sim_num_steps = int(self.config["REWARD_DT"] / self.config["SIMULATE_DT"]) + for _ in range(sim_num_steps): + self.sim.sim_env.sim_step() + self.sim.sim_env.update_viewer() + + def body(self) -> G1Body: + return self._body + + def hands(self) -> Hands: + if not self.with_hands: + raise RuntimeError( + "Hands not initialized. Use --with_hands True to enable hand functionality." + ) + return self._hands + + def observe(self) -> Dict[str, any]: + # Get observations from body and hands + body_obs = self.body().observe() + + body_q = body_obs["body_q"] + body_dq = body_obs["body_dq"] + body_ddq = body_obs["body_ddq"] + body_tau_est = body_obs["body_tau_est"] + + if self.with_hands: + left_hand_obs = self.hands().left.observe() + right_hand_obs = self.hands().right.observe() + left_hand_q = left_hand_obs["hand_q"] + right_hand_q = right_hand_obs["hand_q"] + left_hand_dq = left_hand_obs["hand_dq"] + right_hand_dq = right_hand_obs["hand_dq"] + left_hand_ddq = left_hand_obs["hand_ddq"] + right_hand_ddq = right_hand_obs["hand_ddq"] + left_hand_tau_est = left_hand_obs["hand_tau_est"] + right_hand_tau_est = right_hand_obs["hand_tau_est"] + + # Body and hand joint measurements come in actuator order, so we need to convert them to joint order + whole_q = self.robot_model.get_configuration_from_actuated_joints( + body_actuated_joint_values=body_q, + left_hand_actuated_joint_values=left_hand_q, + right_hand_actuated_joint_values=right_hand_q, + ) + whole_dq = self.robot_model.get_configuration_from_actuated_joints( + body_actuated_joint_values=body_dq, + left_hand_actuated_joint_values=left_hand_dq, + right_hand_actuated_joint_values=right_hand_dq, + ) + whole_ddq = self.robot_model.get_configuration_from_actuated_joints( + body_actuated_joint_values=body_ddq, + left_hand_actuated_joint_values=left_hand_ddq, + right_hand_actuated_joint_values=right_hand_ddq, + ) + whole_tau_est = self.robot_model.get_configuration_from_actuated_joints( + body_actuated_joint_values=body_tau_est, + left_hand_actuated_joint_values=left_hand_tau_est, + right_hand_actuated_joint_values=right_hand_tau_est, + ) + else: + # Body and hand joint measurements come in actuator order, so we need to convert them to joint order + whole_q = self.robot_model.get_configuration_from_actuated_joints( + body_actuated_joint_values=body_q, + ) + whole_dq = self.robot_model.get_configuration_from_actuated_joints( + body_actuated_joint_values=body_dq, + ) + whole_ddq = self.robot_model.get_configuration_from_actuated_joints( + body_actuated_joint_values=body_ddq, + ) + whole_tau_est = self.robot_model.get_configuration_from_actuated_joints( + body_actuated_joint_values=body_tau_est, + ) + + eef_obs = self.get_eef_obs(whole_q) + + obs = { + "q": whole_q, + "dq": whole_dq, + "ddq": whole_ddq, + "tau_est": whole_tau_est, + "floating_base_pose": body_obs["floating_base_pose"], + "floating_base_vel": body_obs["floating_base_vel"], + "floating_base_acc": body_obs["floating_base_acc"], + "wrist_pose": np.concatenate([eef_obs["left_wrist_pose"], eef_obs["right_wrist_pose"]]), + "torso_quat": body_obs["torso_quat"], + "torso_ang_vel": body_obs["torso_ang_vel"], + } + + if self.use_sim and self.sim: + obs.update(self.sim.get_privileged_obs()) + + # Store last observation for safety checking + self.last_obs = obs + + return obs + + @property + def observation_space(self) -> gym.Space: + # @todo: check if the low and high bounds are correct for body_obs. + q_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(self.robot_model.num_dofs,)) + dq_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(self.robot_model.num_dofs,)) + ddq_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(self.robot_model.num_dofs,)) + tau_est_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(self.robot_model.num_dofs,)) + floating_base_pose_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(7,)) + floating_base_vel_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(6,)) + floating_base_acc_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(6,)) + wrist_pose_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(7 + 7,)) + return gym.spaces.Dict( + { + "floating_base_pose": floating_base_pose_space, + "floating_base_vel": floating_base_vel_space, + "floating_base_acc": floating_base_acc_space, + "q": q_space, + "dq": dq_space, + "ddq": ddq_space, + "tau_est": tau_est_space, + "wrist_pose": wrist_pose_space, + } + ) + + def queue_action(self, action: Dict[str, any]): + # Safety check + if self.last_obs is not None: + safety_result = self.safety_monitor.handle_violations(self.last_obs, action) + action = safety_result["action"] + + # Map action from joint order to actuator order + body_actuator_q = self.robot_model.get_body_actuated_joints(action["q"]) + + self.body().queue_action( + { + "body_q": body_actuator_q, + "body_dq": np.zeros_like(body_actuator_q), + "body_tau": np.zeros_like(body_actuator_q), + } + ) + + if self.with_hands: + left_hand_actuator_q = self.robot_model.get_hand_actuated_joints( + action["q"], side="left" + ) + right_hand_actuator_q = self.robot_model.get_hand_actuated_joints( + action["q"], side="right" + ) + self.hands().left.queue_action({"hand_q": left_hand_actuator_q}) + self.hands().right.queue_action({"hand_q": right_hand_actuator_q}) + + def action_space(self) -> gym.Space: + return gym.spaces.Box(low=-np.inf, high=np.inf, shape=(self.robot_model.num_dofs,)) + + def calibrate_hands(self): + """Calibrate the hand joint qpos if real robot""" + if self.with_hands: + print("calibrating left hand") + self.hands().left.calibrate_hand() + print("calibrating right hand") + self.hands().right.calibrate_hand() + else: + print("Skipping hand calibration - hands disabled") + + def set_ik_indicator(self, teleop_cmd): + """Set the IK indicators for the simulator""" + if self.config["SIMULATOR"] == "robocasa": + if "left_wrist" in teleop_cmd and "right_wrist" in teleop_cmd: + left_wrist_input_pose = teleop_cmd["left_wrist"] + right_wrist_input_pose = teleop_cmd["right_wrist"] + ik_wrapper = self.sim.env.env.unwrapped.env + ik_wrapper.set_target_poses_outside_env( + [left_wrist_input_pose, right_wrist_input_pose] + ) + else: + raise NotImplementedError("IK indicators are only implemented for robocasa simulator") + + def set_sync_mode(self, sync_mode: bool, steps_per_action: int = 4): + """When set to True, the simulator will wait for the action to be sent to it""" + if self.config["SIMULATOR"] == "robocasa": + self.sim.set_sync_mode(sync_mode, steps_per_action) + + def reset(self): + if self.sim: + self.sim.reset() + + def close(self): + if self.sim: + self.sim.close() + + def robot_model(self) -> RobotModel: + return self.robot_model + + def get_reward(self): + if self.sim: + return self.sim.get_reward() + + def reset_obj_pos(self): + if hasattr(self.sim, "base_env") and hasattr(self.sim.base_env, "reset_obj_pos"): + self.sim.base_env.reset_obj_pos() + + def get_eef_obs(self, q: np.ndarray) -> Dict[str, np.ndarray]: + self.robot_model.cache_forward_kinematics(q) + eef_obs = {} + for side in ["left", "right"]: + wrist_placement = self.robot_model.frame_placement( + self.robot_model.supplemental_info.hand_frame_names[side] + ) + wrist_pos, wrist_quat = wrist_placement.translation[:3], R.from_matrix( + wrist_placement.rotation + ).as_quat(scalar_first=True) + eef_obs[f"{side}_wrist_pose"] = np.concatenate([wrist_pos, wrist_quat]) + + return eef_obs + + def get_joint_safety_status(self) -> bool: + """Get current joint safety status from the last queue_action safety check. + + Returns: + bool: True if joints are safe (no shutdown required), False if unsafe + """ + return self.last_safety_ok + + def handle_keyboard_button(self, key): + # Only handles keyboard buttons for the mujoco simulator for now. + if self.use_sim and self.config.get("SIMULATOR", "mujoco") == "mujoco": + self.sim.handle_keyboard_button(key) + + +if __name__ == "__main__": + env = G1Env(robot_model=instantiate_g1_robot_model(), wbc_version="gear_wbc") + while True: + print(env.observe()) diff --git a/decoupled_wbc/control/envs/g1/g1_hand.py b/decoupled_wbc/control/envs/g1/g1_hand.py new file mode 100644 index 0000000..d2b8408 --- /dev/null +++ b/decoupled_wbc/control/envs/g1/g1_hand.py @@ -0,0 +1,89 @@ +import time + +import gymnasium as gym +import numpy as np + +from decoupled_wbc.control.base.env import Env +from decoupled_wbc.control.envs.g1.utils.command_sender import HandCommandSender +from decoupled_wbc.control.envs.g1.utils.state_processor import HandStateProcessor + + +class G1ThreeFingerHand(Env): + def __init__(self, is_left: bool = True): + super().__init__() + self.is_left = is_left + self.hand_state_processor = HandStateProcessor(is_left=self.is_left) + self.hand_command_sender = HandCommandSender(is_left=self.is_left) + self.hand_q_offset = np.zeros(7) + + def observe(self) -> dict[str, any]: + hand_state = self.hand_state_processor._prepare_low_state() # (1, 28) + assert hand_state.shape == (1, 28) + + # Apply offset to the hand state + hand_state[0, :7] = hand_state[0, :7] + self.hand_q_offset + + hand_q = hand_state[0, :7] + hand_dq = hand_state[0, 7:14] + hand_ddq = hand_state[0, 21:28] + hand_tau_est = hand_state[0, 14:21] + + # Return the state for this specific hand (left or right) + return { + "hand_q": hand_q, + "hand_dq": hand_dq, + "hand_ddq": hand_ddq, + "hand_tau_est": hand_tau_est, + } + + def queue_action(self, action: dict[str, any]): + # Apply offset to the hand target + action["hand_q"] = action["hand_q"] - self.hand_q_offset + + # action should contain hand_q + self.hand_command_sender.send_command(action["hand_q"]) + + def observation_space(self) -> gym.Space: + return gym.spaces.Dict( + { + "hand_q": gym.spaces.Box(low=-np.inf, high=np.inf, shape=(7,)), + "hand_dq": gym.spaces.Box(low=-np.inf, high=np.inf, shape=(7,)), + "hand_ddq": gym.spaces.Box(low=-np.inf, high=np.inf, shape=(7,)), + "hand_tau_est": gym.spaces.Box(low=-np.inf, high=np.inf, shape=(7,)), + } + ) + + def action_space(self) -> gym.Space: + return gym.spaces.Dict({"hand_q": gym.spaces.Box(low=-np.inf, high=np.inf, shape=(7,))}) + + def calibrate_hand(self): + hand_obs = self.observe() + hand_q = hand_obs["hand_q"] + + hand_q_target = np.zeros_like(hand_q) + hand_q_target[0] = hand_q[0] + + # joint limit + hand_q0_upper_limit = np.deg2rad(60) # lower limit is -60 + + # move the figure counterclockwise until the limit + while True: + + if hand_q_target[0] - hand_q[0] < np.deg2rad(60): + hand_q_target[0] += np.deg2rad(10) + else: + self.hand_q_offset[0] = hand_q0_upper_limit - hand_q[0] + break + + self.queue_action({"hand_q": hand_q_target}) + + hand_obs = self.observe() + hand_q = hand_obs["hand_q"] + + time.sleep(0.1) + + print("done calibration, q0 offset (deg):", np.rad2deg(self.hand_q_offset[0])) + + # done calibrating, set target to zero + self.hand_q_target = np.zeros_like(hand_q) + self.queue_action({"hand_q": self.hand_q_target}) diff --git a/gr00t_wbc/control/envs/g1/sim/__init__.py b/decoupled_wbc/control/envs/g1/sim/__init__.py similarity index 100% rename from gr00t_wbc/control/envs/g1/sim/__init__.py rename to decoupled_wbc/control/envs/g1/sim/__init__.py diff --git a/decoupled_wbc/control/envs/g1/sim/base_sim.py b/decoupled_wbc/control/envs/g1/sim/base_sim.py new file mode 100644 index 0000000..4a92cc9 --- /dev/null +++ b/decoupled_wbc/control/envs/g1/sim/base_sim.py @@ -0,0 +1,772 @@ +import argparse +import pathlib +from pathlib import Path +import threading +from threading import Lock, Thread +from typing import Dict + +import mujoco +import mujoco.viewer +import numpy as np +import rclpy +from unitree_sdk2py.core.channel import ChannelFactoryInitialize +import yaml + +from decoupled_wbc.control.envs.g1.sim.image_publish_utils import ImagePublishProcess +from decoupled_wbc.control.envs.g1.sim.metric_utils import check_contact, check_height +from decoupled_wbc.control.envs.g1.sim.sim_utilts import get_subtree_body_names +from decoupled_wbc.control.envs.g1.sim.unitree_sdk2py_bridge import ElasticBand, UnitreeSdk2Bridge + +DECOUPLED_WBC_ROOT = Path(__file__).resolve().parent.parent.parent.parent.parent.parent + + +class DefaultEnv: + """Base environment class that handles simulation environment setup and step""" + + def __init__( + self, + config: Dict[str, any], + env_name: str = "default", + camera_configs: Dict[str, any] = {}, + onscreen: bool = False, + offscreen: bool = False, + enable_image_publish: bool = False, + ): + # global_view is only set up for this specifc scene for now. + if config["ROBOT_SCENE"] == "decoupled_wbc/control/robot_model/model_data/g1/scene_29dof.xml": + camera_configs["global_view"] = { + "height": 400, + "width": 400, + } + self.config = config + self.env_name = env_name + self.num_body_dof = self.config["NUM_JOINTS"] + self.num_hand_dof = self.config["NUM_HAND_JOINTS"] + self.sim_dt = self.config["SIMULATE_DT"] + self.obs = None + self.torques = np.zeros(self.num_body_dof + self.num_hand_dof * 2) + self.torque_limit = np.array(self.config["motor_effort_limit_list"]) + self.camera_configs = camera_configs + + # Thread safety lock + self.reward_lock = Lock() + + # Unitree bridge will be initialized by the simulator + self.unitree_bridge = None + + # Store display mode + self.onscreen = onscreen + + # Initialize scene (defined in subclasses) + self.init_scene() + self.last_reward = 0 + + # Setup offscreen rendering if needed + self.offscreen = offscreen + if self.offscreen: + self.init_renderers() + self.image_dt = self.config.get("IMAGE_DT", 0.033333) + self.image_publish_process = None + + def start_image_publish_subprocess(self, start_method: str = "spawn", camera_port: int = 5555): + # Use spawn method for better GIL isolation, or configured method + if len(self.camera_configs) == 0: + print( + "Warning: No camera configs provided, image publishing subprocess will not be started" + ) + return + start_method = self.config.get("MP_START_METHOD", "spawn") + self.image_publish_process = ImagePublishProcess( + camera_configs=self.camera_configs, + image_dt=self.image_dt, + zmq_port=camera_port, + start_method=start_method, + verbose=self.config.get("verbose", False), + ) + self.image_publish_process.start_process() + + def init_scene(self): + """Initialize the default robot scene""" + self.mj_model = mujoco.MjModel.from_xml_path( + str(pathlib.Path(DECOUPLED_WBC_ROOT) / self.config["ROBOT_SCENE"]) + ) + self.mj_data = mujoco.MjData(self.mj_model) + self.mj_model.opt.timestep = self.sim_dt + self.torso_index = mujoco.mj_name2id(self.mj_model, mujoco.mjtObj.mjOBJ_BODY, "torso_link") + self.root_body = "pelvis" + # Enable the elastic band + if self.config["ENABLE_ELASTIC_BAND"]: + self.elastic_band = ElasticBand() + if "g1" in self.config["ROBOT_TYPE"]: + if self.config["enable_waist"]: + self.band_attached_link = self.mj_model.body("pelvis").id + else: + self.band_attached_link = self.mj_model.body("torso_link").id + elif "h1" in self.config["ROBOT_TYPE"]: + self.band_attached_link = self.mj_model.body("torso_link").id + else: + self.band_attached_link = self.mj_model.body("base_link").id + + if self.onscreen: + self.viewer = mujoco.viewer.launch_passive( + self.mj_model, + self.mj_data, + key_callback=self.elastic_band.MujuocoKeyCallback, + show_left_ui=False, + show_right_ui=False, + ) + else: + mujoco.mj_forward(self.mj_model, self.mj_data) + self.viewer = None + else: + if self.onscreen: + self.viewer = mujoco.viewer.launch_passive( + self.mj_model, self.mj_data, show_left_ui=False, show_right_ui=False + ) + else: + mujoco.mj_forward(self.mj_model, self.mj_data) + self.viewer = None + + if self.viewer: + # viewer camera + self.viewer.cam.azimuth = 120 # Horizontal rotation in degrees + self.viewer.cam.elevation = -30 # Vertical tilt in degrees + self.viewer.cam.distance = 2.0 # Distance from camera to target + self.viewer.cam.lookat = np.array([0, 0, 0.5]) # Point the camera is looking at + + # Note that the actuator order is the same as the joint order in the mujoco model. + self.body_joint_index = [] + self.left_hand_index = [] + self.right_hand_index = [] + for i in range(self.mj_model.njnt): + name = self.mj_model.joint(i).name + if any( + [ + part_name in name + for part_name in ["hip", "knee", "ankle", "waist", "shoulder", "elbow", "wrist"] + ] + ): + self.body_joint_index.append(i) + elif "left_hand" in name: + self.left_hand_index.append(i) + elif "right_hand" in name: + self.right_hand_index.append(i) + + assert len(self.body_joint_index) == self.config["NUM_JOINTS"] + assert len(self.left_hand_index) == self.config["NUM_HAND_JOINTS"] + assert len(self.right_hand_index) == self.config["NUM_HAND_JOINTS"] + + self.body_joint_index = np.array(self.body_joint_index) + self.left_hand_index = np.array(self.left_hand_index) + self.right_hand_index = np.array(self.right_hand_index) + + def init_renderers(self): + # Initialize camera renderers + self.renderers = {} + for camera_name, camera_config in self.camera_configs.items(): + renderer = mujoco.Renderer( + self.mj_model, height=camera_config["height"], width=camera_config["width"] + ) + self.renderers[camera_name] = renderer + + def compute_body_torques(self) -> np.ndarray: + """Compute body torques based on the current robot state""" + body_torques = np.zeros(self.num_body_dof) + if self.unitree_bridge is not None and self.unitree_bridge.low_cmd: + for i in range(self.unitree_bridge.num_body_motor): + if self.unitree_bridge.use_sensor: + body_torques[i] = ( + self.unitree_bridge.low_cmd.motor_cmd[i].tau + + self.unitree_bridge.low_cmd.motor_cmd[i].kp + * (self.unitree_bridge.low_cmd.motor_cmd[i].q - self.mj_data.sensordata[i]) + + self.unitree_bridge.low_cmd.motor_cmd[i].kd + * ( + self.unitree_bridge.low_cmd.motor_cmd[i].dq + - self.mj_data.sensordata[i + self.unitree_bridge.num_body_motor] + ) + ) + else: + body_torques[i] = ( + self.unitree_bridge.low_cmd.motor_cmd[i].tau + + self.unitree_bridge.low_cmd.motor_cmd[i].kp + * ( + self.unitree_bridge.low_cmd.motor_cmd[i].q + - self.mj_data.qpos[self.body_joint_index[i] + 7 - 1] + ) + + self.unitree_bridge.low_cmd.motor_cmd[i].kd + * ( + self.unitree_bridge.low_cmd.motor_cmd[i].dq + - self.mj_data.qvel[self.body_joint_index[i] + 6 - 1] + ) + ) + return body_torques + + def compute_hand_torques(self) -> np.ndarray: + """Compute hand torques based on the current robot state""" + left_hand_torques = np.zeros(self.num_hand_dof) + right_hand_torques = np.zeros(self.num_hand_dof) + if self.unitree_bridge is not None and self.unitree_bridge.low_cmd: + for i in range(self.unitree_bridge.num_hand_motor): + left_hand_torques[i] = ( + self.unitree_bridge.left_hand_cmd.motor_cmd[i].tau + + self.unitree_bridge.left_hand_cmd.motor_cmd[i].kp + * ( + self.unitree_bridge.left_hand_cmd.motor_cmd[i].q + - self.mj_data.qpos[self.left_hand_index[i] + 7 - 1] + ) + + self.unitree_bridge.left_hand_cmd.motor_cmd[i].kd + * ( + self.unitree_bridge.left_hand_cmd.motor_cmd[i].dq + - self.mj_data.qvel[self.left_hand_index[i] + 6 - 1] + ) + ) + right_hand_torques[i] = ( + self.unitree_bridge.right_hand_cmd.motor_cmd[i].tau + + self.unitree_bridge.right_hand_cmd.motor_cmd[i].kp + * ( + self.unitree_bridge.right_hand_cmd.motor_cmd[i].q + - self.mj_data.qpos[self.right_hand_index[i] + 7 - 1] + ) + + self.unitree_bridge.right_hand_cmd.motor_cmd[i].kd + * ( + self.unitree_bridge.right_hand_cmd.motor_cmd[i].dq + - self.mj_data.qvel[self.right_hand_index[i] + 6 - 1] + ) + ) + return np.concatenate((left_hand_torques, right_hand_torques)) + + def compute_body_qpos(self) -> np.ndarray: + """Compute body joint positions based on the current command""" + body_qpos = np.zeros(self.num_body_dof) + if self.unitree_bridge is not None and self.unitree_bridge.low_cmd: + for i in range(self.unitree_bridge.num_body_motor): + body_qpos[i] = self.unitree_bridge.low_cmd.motor_cmd[i].q + return body_qpos + + def compute_hand_qpos(self) -> np.ndarray: + """Compute hand joint positions based on the current command""" + hand_qpos = np.zeros(self.num_hand_dof * 2) + if self.unitree_bridge is not None and self.unitree_bridge.low_cmd: + for i in range(self.unitree_bridge.num_hand_motor): + hand_qpos[i] = self.unitree_bridge.left_hand_cmd.motor_cmd[i].q + hand_qpos[i + self.num_hand_dof] = self.unitree_bridge.right_hand_cmd.motor_cmd[i].q + return hand_qpos + + def prepare_obs(self) -> Dict[str, any]: + """Prepare observation dictionary from the current robot state""" + obs = {} + obs["floating_base_pose"] = self.mj_data.qpos[:7] + obs["floating_base_vel"] = self.mj_data.qvel[:6] + obs["floating_base_acc"] = self.mj_data.qacc[:6] + obs["secondary_imu_quat"] = self.mj_data.xquat[self.torso_index] + obs["secondary_imu_vel"] = self.mj_data.cvel[self.torso_index] + obs["body_q"] = self.mj_data.qpos[self.body_joint_index + 7 - 1] + obs["body_dq"] = self.mj_data.qvel[self.body_joint_index + 6 - 1] + obs["body_ddq"] = self.mj_data.qacc[self.body_joint_index + 6 - 1] + obs["body_tau_est"] = self.mj_data.actuator_force[self.body_joint_index - 1] + if self.num_hand_dof > 0: + obs["left_hand_q"] = self.mj_data.qpos[self.left_hand_index + 7 - 1] + obs["left_hand_dq"] = self.mj_data.qvel[self.left_hand_index + 6 - 1] + obs["left_hand_ddq"] = self.mj_data.qacc[self.left_hand_index + 6 - 1] + obs["left_hand_tau_est"] = self.mj_data.actuator_force[self.left_hand_index - 1] + obs["right_hand_q"] = self.mj_data.qpos[self.right_hand_index + 7 - 1] + obs["right_hand_dq"] = self.mj_data.qvel[self.right_hand_index + 6 - 1] + obs["right_hand_ddq"] = self.mj_data.qacc[self.right_hand_index + 6 - 1] + obs["right_hand_tau_est"] = self.mj_data.actuator_force[self.right_hand_index - 1] + obs["time"] = self.mj_data.time + return obs + + def sim_step(self): + self.obs = self.prepare_obs() + self.unitree_bridge.PublishLowState(self.obs) + if self.unitree_bridge.joystick: + self.unitree_bridge.PublishWirelessController() + if self.config["ENABLE_ELASTIC_BAND"]: + if self.elastic_band.enable: + # Get Cartesian pose and velocity of the band_attached_link + pose = np.concatenate( + [ + self.mj_data.xpos[self.band_attached_link], # link position in world + self.mj_data.xquat[ + self.band_attached_link + ], # link quaternion in world [w,x,y,z] + np.zeros(6), # placeholder for velocity + ] + ) + + # Get velocity in world frame + mujoco.mj_objectVelocity( + self.mj_model, + self.mj_data, + mujoco.mjtObj.mjOBJ_BODY, + self.band_attached_link, + pose[7:13], + 0, # 0 for world frame + ) + + # Reorder velocity from [ang, lin] to [lin, ang] + pose[7:10], pose[10:13] = pose[10:13], pose[7:10].copy() + self.mj_data.xfrc_applied[self.band_attached_link] = self.elastic_band.Advance(pose) + else: + # explicitly resetting the force when the band is not enabled + self.mj_data.xfrc_applied[self.band_attached_link] = np.zeros(6) + body_torques = self.compute_body_torques() + hand_torques = self.compute_hand_torques() + self.torques[self.body_joint_index - 1] = body_torques + if self.num_hand_dof > 0: + self.torques[self.left_hand_index - 1] = hand_torques[: self.num_hand_dof] + self.torques[self.right_hand_index - 1] = hand_torques[self.num_hand_dof :] + + self.torques = np.clip(self.torques, -self.torque_limit, self.torque_limit) + + if self.config["FREE_BASE"]: + self.mj_data.ctrl = np.concatenate((np.zeros(6), self.torques)) + else: + self.mj_data.ctrl = self.torques + mujoco.mj_step(self.mj_model, self.mj_data) + # self.check_self_collision() + + def kinematics_step(self): + """ + Run kinematics only: compute the qpos of the robot and directly set the qpos. + For debugging purposes. + """ + if self.unitree_bridge is not None: + self.unitree_bridge.PublishLowState(self.prepare_obs()) + if self.unitree_bridge.joystick: + self.unitree_bridge.PublishWirelessController() + + if self.config["ENABLE_ELASTIC_BAND"]: + if self.elastic_band.enable: + # Get Cartesian pose and velocity of the band_attached_link + pose = np.concatenate( + [ + self.mj_data.xpos[self.band_attached_link], # link position in world + self.mj_data.xquat[ + self.band_attached_link + ], # link quaternion in world [w,x,y,z] + np.zeros(6), # placeholder for velocity + ] + ) + + # Get velocity in world frame + mujoco.mj_objectVelocity( + self.mj_model, + self.mj_data, + mujoco.mjtObj.mjOBJ_BODY, + self.band_attached_link, + pose[7:13], + 0, # 0 for world frame + ) + + # Reorder velocity from [ang, lin] to [lin, ang] + pose[7:10], pose[10:13] = pose[10:13], pose[7:10].copy() + + self.mj_data.xfrc_applied[self.band_attached_link] = self.elastic_band.Advance(pose) + else: + # explicitly resetting the force when the band is not enabled + self.mj_data.xfrc_applied[self.band_attached_link] = np.zeros(6) + + body_qpos = self.compute_body_qpos() # (num_body_dof,) + hand_qpos = self.compute_hand_qpos() # (num_hand_dof * 2,) + + self.mj_data.qpos[self.body_joint_index + 7 - 1] = body_qpos + self.mj_data.qpos[self.left_hand_index + 7 - 1] = hand_qpos[: self.num_hand_dof] + self.mj_data.qpos[self.right_hand_index + 7 - 1] = hand_qpos[self.num_hand_dof :] + + mujoco.mj_kinematics(self.mj_model, self.mj_data) + mujoco.mj_comPos(self.mj_model, self.mj_data) + + def apply_perturbation(self, key): + """Apply perturbation to the robot""" + # Add velocity perturbations in body frame + perturbation_x_body = 0.0 # forward/backward in body frame + perturbation_y_body = 0.0 # left/right in body frame + if key == "up": + perturbation_x_body = 1.0 # forward + elif key == "down": + perturbation_x_body = -1.0 # backward + elif key == "left": + perturbation_y_body = 1.0 # left + elif key == "right": + perturbation_y_body = -1.0 # right + + # Transform body frame velocity to world frame using MuJoCo's rotation + vel_body = np.array([perturbation_x_body, perturbation_y_body, 0.0]) + vel_world = np.zeros(3) + base_quat = self.mj_data.qpos[3:7] # [w, x, y, z] quaternion + + # Use MuJoCo's robust quaternion rotation (handles invalid quaternions automatically) + mujoco.mju_rotVecQuat(vel_world, vel_body, base_quat) + + # Apply to base linear velocity in world frame + self.mj_data.qvel[0] += vel_world[0] # world X velocity + self.mj_data.qvel[1] += vel_world[1] # world Y velocity + + # Update dynamics after velocity change + mujoco.mj_forward(self.mj_model, self.mj_data) + + def update_viewer(self): + if self.viewer is not None: + self.viewer.sync() + + def update_viewer_camera(self): + if self.viewer is not None: + if self.viewer.cam.type == mujoco.mjtCamera.mjCAMERA_TRACKING: + self.viewer.cam.type = mujoco.mjtCamera.mjCAMERA_FREE + else: + self.viewer.cam.type = mujoco.mjtCamera.mjCAMERA_TRACKING + + def update_reward(self): + """Calculate reward. Should be implemented by subclasses.""" + with self.reward_lock: + self.last_reward = 0 + + def get_reward(self): + """Thread-safe way to get the last calculated reward.""" + with self.reward_lock: + return self.last_reward + + def set_unitree_bridge(self, unitree_bridge): + """Set the unitree bridge from the simulator""" + self.unitree_bridge = unitree_bridge + + def get_privileged_obs(self): + """Get privileged observation. Should be implemented by subclasses.""" + return {} + + def update_render_caches(self): + """Update render cache and shared memory for subprocess.""" + render_caches = {} + for camera_name, camera_config in self.camera_configs.items(): + renderer = self.renderers[camera_name] + if "params" in camera_config: + renderer.update_scene(self.mj_data, camera=camera_config["params"]) + else: + renderer.update_scene(self.mj_data, camera=camera_name) + render_caches[camera_name + "_image"] = renderer.render() + + # Update shared memory if image publishing process is available + if self.image_publish_process is not None: + self.image_publish_process.update_shared_memory(render_caches) + + return render_caches + + def handle_keyboard_button(self, key): + if self.elastic_band is not None: + self.elastic_band.handle_keyboard_button(key) + + if key == "backspace": + self.reset() + if key == "v": + self.update_viewer_camera() + if key in ["up", "down", "left", "right"]: + self.apply_perturbation(key) + + def check_fall(self): + """Check if the robot has fallen""" + self.fall = False + if self.mj_data.qpos[2] < 0.2: + self.fall = True + print(f"Warning: Robot has fallen, height: {self.mj_data.qpos[2]:.3f} m") + + if self.fall: + self.reset() + + def check_self_collision(self): + """Check for self-collision of the robot""" + robot_bodies = get_subtree_body_names(self.mj_model, self.mj_model.body(self.root_body).id) + self_collision, contact_bodies = check_contact( + self.mj_model, self.mj_data, robot_bodies, robot_bodies, return_all_contact_bodies=True + ) + if self_collision: + print(f"Warning: Self-collision detected: {contact_bodies}") + return self_collision + + def reset(self): + mujoco.mj_resetData(self.mj_model, self.mj_data) + + +class CubeEnv(DefaultEnv): + """Environment with a cube object for pick and place tasks""" + + def __init__( + self, + config: Dict[str, any], + onscreen: bool = False, + offscreen: bool = False, + enable_image_publish: bool = False, + ): + # Override the robot scene + config = config.copy() # Create a copy to avoid modifying the original + config["ROBOT_SCENE"] = "decoupled_wbc/control/robot_model/model_data/g1/pnp_cube_43dof.xml" + super().__init__(config, "cube", {}, onscreen, offscreen, enable_image_publish) + + def update_reward(self): + """Calculate reward based on gripper contact with cube and cube height""" + right_hand_body = [ + "right_hand_thumb_2_link", + "right_hand_middle_1_link", + "right_hand_index_1_link", + ] + gripper_cube_contact = check_contact( + self.mj_model, self.mj_data, right_hand_body, "cube_body" + ) + cube_lifted = check_height(self.mj_model, self.mj_data, "cube", 0.85, 2.0) + + with self.reward_lock: + self.last_reward = gripper_cube_contact & cube_lifted + + +class BoxEnv(DefaultEnv): + """Environment with a box object for manipulation tasks""" + + def __init__( + self, + config: Dict[str, any], + onscreen: bool = False, + offscreen: bool = False, + enable_image_publish: bool = False, + ): + # Override the robot scene + config = config.copy() # Create a copy to avoid modifying the original + config["ROBOT_SCENE"] = "decoupled_wbc/control/robot_model/model_data/g1/lift_box_43dof.xml" + super().__init__(config, "box", {}, onscreen, offscreen, enable_image_publish) + + def reward(self): + """Calculate reward based on gripper contact with cube and cube height""" + left_hand_body = [ + "left_hand_thumb_2_link", + "left_hand_middle_1_link", + "left_hand_index_1_link", + ] + right_hand_body = [ + "right_hand_thumb_2_link", + "right_hand_middle_1_link", + "right_hand_index_1_link", + ] + gripper_box_contact = check_contact(self.mj_model, self.mj_data, left_hand_body, "box_body") + gripper_box_contact &= check_contact( + self.mj_model, self.mj_data, right_hand_body, "box_body" + ) + box_lifted = check_height(self.mj_model, self.mj_data, "box", 0.92, 2.0) + + print("gripper_box_contact: ", gripper_box_contact, "box_lifted: ", box_lifted) + + with self.reward_lock: + self.last_reward = gripper_box_contact & box_lifted + return self.last_reward + + +class BottleEnv(DefaultEnv): + """Environment with a cylinder object for manipulation tasks""" + + def __init__( + self, + config: Dict[str, any], + onscreen: bool = False, + offscreen: bool = False, + enable_image_publish: bool = False, + ): + # Override the robot scene + config = config.copy() # Create a copy to avoid modifying the original + config["ROBOT_SCENE"] = "decoupled_wbc/control/robot_model/model_data/g1/pnp_bottle_43dof.xml" + camera_configs = { + "egoview": { + "height": 400, + "width": 400, + }, + } + super().__init__( + config, "cylinder", camera_configs, onscreen, offscreen, enable_image_publish + ) + + self.bottle_body = self.mj_model.body("bottle_body") + self.bottle_geom = self.mj_model.geom("bottle") + + if self.viewer is not None: + self.viewer.cam.type = mujoco.mjtCamera.mjCAMERA_FIXED + self.viewer.cam.fixedcamid = self.mj_model.camera("egoview").id + + def update_reward(self): + """Calculate reward based on gripper contact with cylinder and cylinder height""" + pass + + def get_privileged_obs(self): + obs_pos = self.mj_data.xpos[self.bottle_body.id] + obs_quat = self.mj_data.xquat[self.bottle_body.id] + return {"bottle_pos": obs_pos, "bottle_quat": obs_quat} + + +class BaseSimulator: + """Base simulator class that handles initialization and running of simulations""" + + def __init__(self, config: Dict[str, any], env_name: str = "default", **kwargs): + self.config = config + self.env_name = env_name + + # Initialize ROS 2 node + if not rclpy.ok(): + rclpy.init() + self.node = rclpy.create_node("sim_mujoco") + self.thread = threading.Thread(target=rclpy.spin, args=(self.node,), daemon=True) + self.thread.start() + else: + self.thread = None + executor = rclpy.get_global_executor() + self.node = executor.get_nodes()[0] # will only take the first node + + # Create rate objects for different update frequencies + self.sim_dt = self.config["SIMULATE_DT"] + self.reward_dt = self.config.get("REWARD_DT", 0.02) + self.image_dt = self.config.get("IMAGE_DT", 0.033333) + self.viewer_dt = self.config.get("VIEWER_DT", 0.02) + self.rate = self.node.create_rate(1 / self.sim_dt) + + # Create the appropriate environment based on name + if env_name == "default": + self.sim_env = DefaultEnv(config, env_name, **kwargs) + elif env_name == "pnp_cube": + self.sim_env = CubeEnv(config, **kwargs) + elif env_name == "lift_box": + self.sim_env = BoxEnv(config, **kwargs) + elif env_name == "pnp_bottle": + self.sim_env = BottleEnv(config, **kwargs) + else: + raise ValueError(f"Invalid environment name: {env_name}") + + # Initialize the DDS communication layer - should be safe to call multiple times + + try: + if self.config.get("INTERFACE", None): + ChannelFactoryInitialize(self.config["DOMAIN_ID"], self.config["INTERFACE"]) + else: + ChannelFactoryInitialize(self.config["DOMAIN_ID"]) + except Exception as e: + # If it fails because it's already initialized, that's okay + print(f"Note: Channel factory initialization attempt: {e}") + + # Initialize the unitree bridge and pass it to the environment + self.init_unitree_bridge() + self.sim_env.set_unitree_bridge(self.unitree_bridge) + + # Initialize additional components + self.init_subscriber() + self.init_publisher() + + self.sim_thread = None + + def start_as_thread(self): + # Create simulation thread + self.sim_thread = Thread(target=self.start) + self.sim_thread.start() + + def start_image_publish_subprocess(self, start_method: str = "spawn", camera_port: int = 5555): + """Start the image publish subprocess""" + self.sim_env.start_image_publish_subprocess(start_method, camera_port) + + def init_subscriber(self): + """Initialize subscribers. Can be overridden by subclasses.""" + pass + + def init_publisher(self): + """Initialize publishers. Can be overridden by subclasses.""" + pass + + def init_unitree_bridge(self): + """Initialize the unitree SDK bridge""" + self.unitree_bridge = UnitreeSdk2Bridge(self.config) + if self.config["USE_JOYSTICK"]: + self.unitree_bridge.SetupJoystick( + device_id=self.config["JOYSTICK_DEVICE"], js_type=self.config["JOYSTICK_TYPE"] + ) + + def start(self): + """Main simulation loop""" + sim_cnt = 0 + + try: + while ( + self.sim_env.viewer and self.sim_env.viewer.is_running() + ) or self.sim_env.viewer is None: + # Run simulation step + self.sim_env.sim_step() + + # Update viewer at viewer rate + if sim_cnt % int(self.viewer_dt / self.sim_dt) == 0: + self.sim_env.update_viewer() + + # Calculate reward at reward rate + if sim_cnt % int(self.reward_dt / self.sim_dt) == 0: + self.sim_env.update_reward() + + # Update render caches at image rate + if sim_cnt % int(self.image_dt / self.sim_dt) == 0: + self.sim_env.update_render_caches() + + # Sleep to maintain correct rate + self.rate.sleep() + + sim_cnt += 1 + except rclpy.exceptions.ROSInterruptException: + # This is expected when ROS shuts down - exit cleanly + pass + except Exception: + self.close() + + def __del__(self): + """Clean up resources when simulator is deleted""" + self.close() + + def reset(self): + """Reset the simulation. Can be overridden by subclasses.""" + self.sim_env.reset() + + def close(self): + """Close the simulation. Can be overridden by subclasses.""" + try: + # Stop image publishing subprocess + if self.sim_env.image_publish_process is not None: + self.sim_env.image_publish_process.stop() + + # Close viewer + if hasattr(self.sim_env, "viewer") and self.sim_env.viewer is not None: + self.sim_env.viewer.close() + + # Shutdown ROS + if rclpy.ok(): + rclpy.shutdown() + except Exception as e: + print(f"Warning during close: {e}") + + def get_privileged_obs(self): + obs = self.sim_env.get_privileged_obs() + # TODO: add ros2 topic to get privileged obs + return obs + + def handle_keyboard_button(self, key): + # Only handles keyboard buttons for default env. + if self.env_name == "default": + self.sim_env.handle_keyboard_button(key) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Robot") + parser.add_argument( + "--config", + type=str, + default="./decoupled_wbc/control/main/teleop/configs/g1_29dof_gear_wbc.yaml", + help="config file", + ) + args = parser.parse_args() + + with open(args.config, "r") as file: + config = yaml.load(file, Loader=yaml.FullLoader) + + if config.get("INTERFACE", None): + ChannelFactoryInitialize(config["DOMAIN_ID"], config["INTERFACE"]) + else: + ChannelFactoryInitialize(config["DOMAIN_ID"]) + + simulation = BaseSimulator(config) + simulation.start_as_thread() diff --git a/decoupled_wbc/control/envs/g1/sim/image_publish_utils.py b/decoupled_wbc/control/envs/g1/sim/image_publish_utils.py new file mode 100644 index 0000000..2704ed6 --- /dev/null +++ b/decoupled_wbc/control/envs/g1/sim/image_publish_utils.py @@ -0,0 +1,256 @@ +import multiprocessing as mp +from multiprocessing import shared_memory +import time +from typing import Any, Dict + +import numpy as np + +from decoupled_wbc.control.sensor.sensor_server import ImageMessageSchema, SensorServer + + +def get_multiprocessing_info(verbose: bool = True): + """Get information about multiprocessing start methods""" + + if verbose: + print(f"Available start methods: {mp.get_all_start_methods()}") + return mp.get_start_method() + + +class ImagePublishProcess: + """Subprocess for publishing images using shared memory and ZMQ""" + + def __init__( + self, + camera_configs: Dict[str, Any], + image_dt: float, + zmq_port: int = 5555, + start_method: str = "spawn", + verbose: bool = False, + ): + self.camera_configs = camera_configs + self.image_dt = image_dt + self.zmq_port = zmq_port + self.verbose = verbose + self.shared_memory_blocks = {} + self.shared_memory_info = {} + self.process = None + + # Use specific context to avoid global state pollution + self.mp_context = mp.get_context(start_method) + if self.verbose: + print(f"Using multiprocessing context: {start_method}") + + self.stop_event = self.mp_context.Event() + self.data_ready_event = self.mp_context.Event() + + # Ensure events start in correct state + self.stop_event.clear() + self.data_ready_event.clear() + + if self.verbose: + print(f"Initial stop_event state: {self.stop_event.is_set()}") + print(f"Initial data_ready_event state: {self.data_ready_event.is_set()}") + + # Calculate shared memory requirements for each camera + for camera_name, camera_config in camera_configs.items(): + height = camera_config["height"] + width = camera_config["width"] + # RGB image: height * width * 3 (uint8) + size = height * width * 3 + + # Create shared memory block + shm = shared_memory.SharedMemory(create=True, size=size) + self.shared_memory_blocks[camera_name] = shm + self.shared_memory_info[camera_name] = { + "name": shm.name, + "size": size, + "shape": (height, width, 3), + "dtype": np.uint8, + } + + def start_process(self): + """Start the image publishing subprocess""" + if self.verbose: + print(f"Starting subprocess with stop_event state: {self.stop_event.is_set()}") + self.process = self.mp_context.Process( + target=self._image_publish_worker, + args=( + self.shared_memory_info, + self.image_dt, + self.zmq_port, + self.stop_event, + self.data_ready_event, + self.verbose, + ), + ) + self.process.start() + if self.verbose: + print(f"Subprocess started, PID: {self.process.pid}") + + def update_shared_memory(self, render_caches: Dict[str, np.ndarray]): + """Update shared memory with new rendered images""" + images_updated = 0 + for camera_name in self.camera_configs.keys(): + image_key = f"{camera_name}_image" + if image_key in render_caches: + image = render_caches[image_key] + + # Ensure image is uint8 and has correct shape + if image.dtype != np.uint8: + image = (image * 255).astype(np.uint8) + + # Get shared memory array + shm = self.shared_memory_blocks[camera_name] + shared_array = np.ndarray( + self.shared_memory_info[camera_name]["shape"], + dtype=self.shared_memory_info[camera_name]["dtype"], + buffer=shm.buf, + ) + + # Copy image data to shared memory atomically + np.copyto(shared_array, image) + images_updated += 1 + + # Signal that new data is ready only after all images are written + if images_updated > 0: + if self.verbose: + print(f"Main process: Updated {images_updated} images, setting data_ready_event") + self.data_ready_event.set() + elif self.verbose: + print( + "Main process: No images to update. " + "please check if camera configs are provided and the renderer is properly initialized" + ) + + def stop(self): + """Stop the image publishing subprocess""" + if self.verbose: + print("Stopping image publishing subprocess...") + self.stop_event.set() + + if self.process and self.process.is_alive(): + # Give the process time to clean up gracefully + self.process.join(timeout=5) + if self.process.is_alive(): + if self.verbose: + print("Subprocess didn't stop gracefully, terminating...") + self.process.terminate() + self.process.join(timeout=2) + if self.process.is_alive(): + if self.verbose: + print("Force killing subprocess...") + self.process.kill() + self.process.join() + + # Clean up shared memory + for camera_name, shm in self.shared_memory_blocks.items(): + try: + shm.close() + shm.unlink() + if self.verbose: + print(f"Cleaned up shared memory for {camera_name}") + except Exception as e: + if self.verbose: + print(f"Warning: Failed to cleanup shared memory for {camera_name}: {e}") + + self.shared_memory_blocks.clear() + if self.verbose: + print("Image publishing subprocess stopped and cleaned up") + + @staticmethod + def _image_publish_worker( + shared_memory_info, image_dt, zmq_port, stop_event, data_ready_event, verbose + ): + """Worker function that runs in the subprocess""" + if verbose: + print(f"Worker started! PID: {__import__('os').getpid()}") + print(f"Worker stop_event state at start: {stop_event.is_set()}") + print(f"Worker data_ready_event state at start: {data_ready_event.is_set()}") + + try: + # Initialize ZMQ sensor server + sensor_server = SensorServer() + sensor_server.start_server(port=zmq_port) + + # Connect to shared memory blocks + shared_arrays = {} + shm_blocks = {} + for camera_name, info in shared_memory_info.items(): + shm = shared_memory.SharedMemory(name=info["name"]) + shm_blocks[camera_name] = shm + shared_arrays[camera_name] = np.ndarray( + info["shape"], dtype=info["dtype"], buffer=shm.buf + ) + + print( + f"Image publishing subprocess started with {len(shared_arrays)} cameras on ZMQ port {zmq_port}" + ) + + loop_count = 0 + last_data_time = time.time() + + while not stop_event.is_set(): + loop_count += 1 + + # Wait for new data with shorter timeout for better responsiveness + timeout = min(image_dt, 0.1) # Max 100ms timeout + data_available = data_ready_event.wait(timeout=timeout) + + current_time = time.time() + + if data_available: + data_ready_event.clear() + if loop_count % 50 == 0: + print("Image publish frequency: ", 1 / (current_time - last_data_time)) + last_data_time = current_time + + # Collect all camera images and serialize them + try: + from decoupled_wbc.control.sensor.sensor_server import ImageUtils + + # Copy all images atomically at once + image_copies = {name: arr.copy() for name, arr in shared_arrays.items()} + + # Create message with all camera images + message_dict = { + "images": image_copies, + "timestamps": {name: current_time for name in image_copies.keys()}, + } + + # Create ImageMessageSchema and serialize + image_msg = ImageMessageSchema( + timestamps=message_dict.get("timestamps"), + images=message_dict.get("images", None), + ) + + # Serialize and send via ZMQ + serialized_data = image_msg.serialize() + + # Add individual camera images to the message + for camera_name, image_copy in image_copies.items(): + serialized_data[f"{camera_name}"] = ImageUtils.encode_image(image_copy) + + sensor_server.send_message(serialized_data) + + except Exception as e: + print(f"Error publishing images: {e}") + + elif verbose and loop_count % 10 == 0: + print(f"Subprocess: Still waiting for data... (iteration {loop_count})") + + # Small sleep to prevent busy waiting when no data + if not data_available: + time.sleep(0.001) + + except KeyboardInterrupt: + print("Image publisher interrupted by user") + finally: + # Clean up + try: + for shm in shm_blocks.values(): + shm.close() + sensor_server.stop_server() + except Exception as e: + print(f"Error during subprocess cleanup: {e}") + if verbose: + print("Image publish subprocess stopped") diff --git a/decoupled_wbc/control/envs/g1/sim/metric_utils.py b/decoupled_wbc/control/envs/g1/sim/metric_utils.py new file mode 100644 index 0000000..759444b --- /dev/null +++ b/decoupled_wbc/control/envs/g1/sim/metric_utils.py @@ -0,0 +1,71 @@ +from typing import List, Tuple + +import mujoco + +from decoupled_wbc.control.envs.g1.sim.sim_utilts import get_body_geom_ids + + +def check_contact( + mj_model: mujoco.MjModel, + mj_data: mujoco.MjData, + bodies_1: List[str] | str, + bodies_2: List[str] | str, + return_all_contact_bodies: bool = False, +) -> Tuple[bool, List[Tuple[str, str]]] | bool: + """ + Finds contact between two body groups. Any geom in the body is considered to be in contact. + Args: + mj_model (MujocoModel): Current simulation object + mj_data (MjData): Current simulation data + bodies_1 (str or list of int): an individual body name or list of body names. + bodies_2 (str or list of int): another individual body name or list of body names. + Returns: + bool: True if any body in @bodies_1 is in contact with any body in @bodies_2. + """ + if isinstance(bodies_1, str): + bodies_1 = [bodies_1] + if isinstance(bodies_2, str): + bodies_2 = [bodies_2] + + geoms_1 = [get_body_geom_ids(mj_model, mj_model.body(g).id) for g in bodies_1] + geoms_1 = [g for geom_list in geoms_1 for g in geom_list] + geoms_2 = [get_body_geom_ids(mj_model, mj_model.body(g).id) for g in bodies_2] + geoms_2 = [g for geom_list in geoms_2 for g in geom_list] + contact_bodies = [] + for i in range(mj_data.ncon): + contact = mj_data.contact[i] + # check contact geom in geoms + c1_in_g1 = contact.geom1 in geoms_1 + c2_in_g2 = contact.geom2 in geoms_2 if geoms_2 is not None else True + # check contact geom in geoms (flipped) + c2_in_g1 = contact.geom2 in geoms_1 + c1_in_g2 = contact.geom1 in geoms_2 if geoms_2 is not None else True + if (c1_in_g1 and c2_in_g2) or (c1_in_g2 and c2_in_g1): + contact_bodies.append( + ( + mj_model.body(mj_model.geom(contact.geom1).bodyid).name, + mj_model.body(mj_model.geom(contact.geom2).bodyid).name, + ) + ) + if not return_all_contact_bodies: + break + if return_all_contact_bodies: + return len(contact_bodies) > 0, set(contact_bodies) + else: + return len(contact_bodies) > 0 + + +def check_height( + mj_model: mujoco.MjModel, + mj_data: mujoco.MjData, + geom_name: str, + lower_bound: float = -float("inf"), + upper_bound: float = float("inf"), +): + """ + Checks if the height of a geom is greater than a given height. + """ + geom_id = mj_model.geom(geom_name).id + return ( + mj_data.geom_xpos[geom_id][2] < upper_bound and mj_data.geom_xpos[geom_id][2] > lower_bound + ) diff --git a/decoupled_wbc/control/envs/g1/sim/robocasa_sim.py b/decoupled_wbc/control/envs/g1/sim/robocasa_sim.py new file mode 100644 index 0000000..021a512 --- /dev/null +++ b/decoupled_wbc/control/envs/g1/sim/robocasa_sim.py @@ -0,0 +1,63 @@ +from typing import Any, Dict, Tuple + +from unitree_sdk2py.core.channel import ChannelFactoryInitialize + +from decoupled_wbc.control.envs.g1.sim.unitree_sdk2py_bridge import UnitreeSdk2Bridge +from decoupled_wbc.control.envs.robocasa.async_env_server import RoboCasaEnvServer +from decoupled_wbc.control.robot_model.instantiation import get_robot_type_and_model + + +class RoboCasaG1EnvServer(RoboCasaEnvServer): + def __init__( + self, env_name: str, wbc_config: Dict[str, Any], env_kwargs: Dict[str, Any], **kwargs + ): + if UnitreeSdk2Bridge is None: + raise ImportError("UnitreeSdk2Bridge is required for RoboCasaG1EnvServer") + self.wbc_config = wbc_config + _, robot_model = get_robot_type_and_model( + "G1", + enable_waist_ik=wbc_config["enable_waist"], + ) + if env_kwargs.get("camera_names", None) is None: + env_kwargs["camera_names"] = [ + "robot0_oak_egoview", + "robot0_oak_left_monoview", + "robot0_oak_right_monoview", + "robot0_rs_tppview", + ] + if env_kwargs.get("render_camera", None) is None: + if env_kwargs.get("renderer", "mjviewer") == "mjviewer": + env_kwargs["render_camera"] = "robot0_oak_egoview" + else: + env_kwargs["render_camera"] = [ + "robot0_oak_egoview", + "robot0_rs_tppview", + ] + + super().__init__(env_name, "G1", robot_model, env_kwargs=env_kwargs, **kwargs) + + def init_channel(self): + + try: + if self.wbc_config.get("INTERFACE", None): + ChannelFactoryInitialize(self.wbc_config["DOMAIN_ID"], self.wbc_config["INTERFACE"]) + else: + ChannelFactoryInitialize(self.wbc_config["DOMAIN_ID"]) + except Exception: + # If it fails because it's already initialized, that's okay + pass + + self.channel_bridge = UnitreeSdk2Bridge(config=self.wbc_config) + + def publish_obs(self): + # with self.cache_lock: + obs = self.caches["obs"] + self.channel_bridge.PublishLowState(obs) + + def get_action(self) -> Tuple[Dict[str, Any], bool, bool]: + q, ready, is_new_action = self.channel_bridge.GetAction() + return {"q": q}, ready, is_new_action + + def reset(self): + super().reset() + self.channel_bridge.reset() diff --git a/gr00t_wbc/control/envs/g1/sim/sim_utilts.py b/decoupled_wbc/control/envs/g1/sim/sim_utilts.py similarity index 100% rename from gr00t_wbc/control/envs/g1/sim/sim_utilts.py rename to decoupled_wbc/control/envs/g1/sim/sim_utilts.py diff --git a/decoupled_wbc/control/envs/g1/sim/simulator_factory.py b/decoupled_wbc/control/envs/g1/sim/simulator_factory.py new file mode 100644 index 0000000..857f4b1 --- /dev/null +++ b/decoupled_wbc/control/envs/g1/sim/simulator_factory.py @@ -0,0 +1,144 @@ +import time +from typing import Any, Dict + +from unitree_sdk2py.core.channel import ChannelFactoryInitialize + +from decoupled_wbc.control.envs.g1.sim.base_sim import BaseSimulator + + +def init_channel(config: Dict[str, Any]) -> None: + """ + Initialize the communication channel for simulator/robot communication. + + Args: + config: Configuration dictionary containing DOMAIN_ID and optionally INTERFACE + """ + if config.get("INTERFACE", None): + ChannelFactoryInitialize(config["DOMAIN_ID"], config["INTERFACE"]) + else: + ChannelFactoryInitialize(config["DOMAIN_ID"]) + + +class SimulatorFactory: + """Factory class for creating different types of simulators.""" + + @staticmethod + def create_simulator(config: Dict[str, Any], env_name: str = "default", **kwargs): + """ + Create a simulator based on the configuration. + + Args: + config: Configuration dictionary containing SIMULATOR type + env_name: Environment name + **kwargs: Additional keyword arguments for specific simulators + """ + simulator_type = config.get("SIMULATOR", "mujoco") + if simulator_type == "mujoco": + return SimulatorFactory._create_mujoco_simulator(config, env_name, **kwargs) + elif simulator_type == "robocasa": + return SimulatorFactory._create_robocasa_simulator(config, env_name, **kwargs) + else: + print( + f"Warning: Invalid simulator type: {simulator_type}. " + "If you are using run_sim_loop, please ignore this warning." + ) + return None + + @staticmethod + def _create_mujoco_simulator(config: Dict[str, Any], env_name: str = "default", **kwargs): + """Create a MuJoCo simulator instance.""" + env_kwargs = dict( + onscreen=kwargs.pop("onscreen", True), + offscreen=kwargs.pop("offscreen", False), + enable_image_publish=kwargs.get("enable_image_publish", False), + ) + return BaseSimulator(config=config, env_name=env_name, **env_kwargs) + + @staticmethod + def _create_robocasa_simulator(config: Dict[str, Any], env_name: str = "default", **kwargs): + """Create a RoboCasa simulator instance.""" + from decoupled_wbc.control.envs.g1.sim.robocasa_sim import RoboCasaG1EnvServer + from decoupled_wbc.control.envs.robocasa.utils.controller_utils import ( + update_robosuite_controller_configs, + ) + from decoupled_wbc.control.envs.robocasa.utils.sim_utils import change_simulation_timestep + + change_simulation_timestep(config["SIMULATE_DT"]) + + # Use default environment if not specified + if env_name == "default": + env_name = "GroundOnly" + + # Get or create controller configurations + controller_configs = kwargs.get("controller_configs") + if controller_configs is None: + wbc_version = kwargs.get("wbc_version", "gear_wbc") + controller_configs = update_robosuite_controller_configs("G1", wbc_version) + + # Build environment kwargs + env_kwargs = dict( + onscreen=kwargs.pop("onscreen", True), + offscreen=kwargs.pop("offscreen", False), + camera_names=kwargs.pop("camera_names", None), + camera_heights=kwargs.pop("camera_heights", None), + camera_widths=kwargs.pop("camera_widths", None), + control_freq=kwargs.pop("control_freq", 50), + controller_configs=controller_configs, + ik_indicator=kwargs.pop("ik_indicator", False), + randomize_cameras=kwargs.pop("randomize_cameras", True), + ) + + kwargs.update( + { + "verbose": config.pop("verbose", False), + "sim_freq": 1 / config.pop("SIMULATE_DT"), + } + ) + + return RoboCasaG1EnvServer( + env_name=env_name, + wbc_config=config, + env_kwargs=env_kwargs, + **kwargs, + ) + + @staticmethod + def start_simulator( + simulator, + as_thread: bool = True, + enable_image_publish: bool = False, + mp_start_method: str = "spawn", + camera_port: int = 5555, + ): + """ + Start the simulator either as a thread or as a separate process. + + Args: + simulator: The simulator instance to start + config: Configuration dictionary + as_thread: If True, start as thread; if False, start as subprocess + enable_offscreen: If True and not as_thread, start image publishing + """ + + if as_thread: + simulator.start_as_thread() + else: + # Wrap in try-except to make sure simulator is properly closed upon exit. + try: + if enable_image_publish: + simulator.start_image_publish_subprocess( + start_method=mp_start_method, + camera_port=camera_port, + ) + time.sleep(1) + simulator.start() + except KeyboardInterrupt: + print("+++++Simulator interrupted by user.") + except Exception as e: + print(f"++++error in simulator: {e} ++++") + finally: + print("++++closing simulator ++++") + simulator.close() + + # Allow simulator to initialize + time.sleep(1) diff --git a/gr00t_wbc/control/envs/g1/sim/unitree_sdk2py_bridge.py b/decoupled_wbc/control/envs/g1/sim/unitree_sdk2py_bridge.py similarity index 100% rename from gr00t_wbc/control/envs/g1/sim/unitree_sdk2py_bridge.py rename to decoupled_wbc/control/envs/g1/sim/unitree_sdk2py_bridge.py diff --git a/gr00t_wbc/control/envs/g1/utils/__init__.py b/decoupled_wbc/control/envs/g1/utils/__init__.py similarity index 100% rename from gr00t_wbc/control/envs/g1/utils/__init__.py rename to decoupled_wbc/control/envs/g1/utils/__init__.py diff --git a/gr00t_wbc/control/envs/g1/utils/command_sender.py b/decoupled_wbc/control/envs/g1/utils/command_sender.py similarity index 100% rename from gr00t_wbc/control/envs/g1/utils/command_sender.py rename to decoupled_wbc/control/envs/g1/utils/command_sender.py diff --git a/decoupled_wbc/control/envs/g1/utils/joint_safety.py b/decoupled_wbc/control/envs/g1/utils/joint_safety.py new file mode 100644 index 0000000..06c9134 --- /dev/null +++ b/decoupled_wbc/control/envs/g1/utils/joint_safety.py @@ -0,0 +1,534 @@ +"""Joint safety monitor for G1 robot. + +This module implements safety monitoring for arm and finger joint velocities using +joint groups defined in the robot model's supplemental info. Leg joints are not monitored. +""" + +from datetime import datetime +import sys +import time +from typing import Dict, List, Optional, Tuple + +import numpy as np + +from decoupled_wbc.data.viz.rerun_viz import RerunViz + + +class JointSafetyMonitor: + """Monitor joint velocities for G1 robot arms and hands.""" + + # Velocity limits in rad/s + ARM_VELOCITY_LIMIT = 6.0 # rad/s for arm joints + HAND_VELOCITY_LIMIT = 50.0 # rad/s for finger joints + + def __init__(self, robot_model, enable_viz: bool = False, env_type: str = "real"): + """Initialize joint safety monitor. + + Args: + robot_model: The robot model containing joint information + enable_viz: If True, enable rerun visualization (default False) + env_type: Environment type - "sim" or "real" (default "real") + """ + self.robot_model = robot_model + self.safety_margin = 1.0 # Hardcoded safety margin + self.enable_viz = enable_viz + self.env_type = env_type + + # Startup ramping parameters + self.control_frequency = 50 # Hz, hardcoded from run_g1_control_loop.py + self.ramp_duration_steps = int(2.0 * self.control_frequency) # 2 seconds * 50Hz = 100 steps + self.startup_counter = 0 + self.initial_positions = None + self.startup_complete = False + + # Initialize velocity and position limits for monitored joints + self.velocity_limits = {} + self.position_limits = {} + self._initialize_limits() + + # Track violations for reporting + self.violations = [] + + # Initialize visualization + self.right_arm_indices = None + self.right_arm_joint_names = [] + self.left_arm_indices = None + self.left_arm_joint_names = [] + self.right_hand_indices = None + self.right_hand_joint_names = [] + self.left_hand_indices = None + self.left_hand_joint_names = [] + try: + arm_indices = self.robot_model.get_joint_group_indices("arms") + all_joint_names = [self.robot_model.joint_names[i] for i in arm_indices] + # Filter for right and left arm joints + self.right_arm_joint_names = [ + name for name in all_joint_names if name.startswith("right_") + ] + self.right_arm_indices = [ + self.robot_model.joint_to_dof_index[name] for name in self.right_arm_joint_names + ] + self.left_arm_joint_names = [ + name for name in all_joint_names if name.startswith("left_") + ] + self.left_arm_indices = [ + self.robot_model.joint_to_dof_index[name] for name in self.left_arm_joint_names + ] + # Hand joints + hand_indices = self.robot_model.get_joint_group_indices("hands") + all_hand_names = [self.robot_model.joint_names[i] for i in hand_indices] + self.right_hand_joint_names = [ + name for name in all_hand_names if name.startswith("right_") + ] + self.right_hand_indices = [ + self.robot_model.joint_to_dof_index[name] for name in self.right_hand_joint_names + ] + self.left_hand_joint_names = [ + name for name in all_hand_names if name.startswith("left_") + ] + self.left_hand_indices = [ + self.robot_model.joint_to_dof_index[name] for name in self.left_hand_joint_names + ] + except ValueError as e: + print(f"[JointSafetyMonitor] Warning: Could not initialize arm/hand visualization: {e}") + except Exception: + pass + + # Use single tensor_key for each plot + self.right_arm_pos_key = "right_arm_qpos" + self.left_arm_pos_key = "left_arm_qpos" + self.right_arm_vel_key = "right_arm_dq" + self.left_arm_vel_key = "left_arm_dq" + self.right_hand_pos_key = "right_hand_qpos" + self.left_hand_pos_key = "left_hand_qpos" + self.right_hand_vel_key = "right_hand_dq" + self.left_hand_vel_key = "left_hand_dq" + + # Define a consistent color palette for up to 8 joints (tab10 + extra) + self.joint_colors = [ + [31, 119, 180], # blue + [255, 127, 14], # orange + [44, 160, 44], # green + [214, 39, 40], # red + [148, 103, 189], # purple + [140, 86, 75], # brown + [227, 119, 194], # pink + [127, 127, 127], # gray (for 8th joint if needed) + ] + + # Initialize Rerun visualization only if enabled + self.viz = None + if self.enable_viz: + try: + self.viz = RerunViz( + image_keys=[], + tensor_keys=[ + self.right_arm_pos_key, + self.left_arm_pos_key, + self.right_arm_vel_key, + self.left_arm_vel_key, + self.right_hand_pos_key, + self.left_hand_pos_key, + self.right_hand_vel_key, + self.left_hand_vel_key, + ], + window_size=10.0, + app_name="joint_safety_monitor", + ) + except Exception: + self.viz = None + + def _initialize_limits(self): + """Initialize velocity and position limits for arm and hand joints using robot model joint groups.""" + if self.robot_model.supplemental_info is None: + raise ValueError("Robot model must have supplemental_info to use joint groups") + + # Get arm joint indices from robot model joint groups + try: + arm_indices = self.robot_model.get_joint_group_indices("arms") + arm_joint_names = [self.robot_model.joint_names[i] for i in arm_indices] + + for joint_name in arm_joint_names: + # Set velocity limits + vel_limit = self.ARM_VELOCITY_LIMIT * self.safety_margin + self.velocity_limits[joint_name] = {"min": -vel_limit, "max": vel_limit} + + # Set position limits from robot model + if joint_name in self.robot_model.joint_to_dof_index: + joint_idx = self.robot_model.joint_to_dof_index[joint_name] + # Adjust index for floating base if present + limit_idx = joint_idx - (7 if self.robot_model.is_floating_base_model else 0) + + if 0 <= limit_idx < len(self.robot_model.lower_joint_limits): + pos_min = self.robot_model.lower_joint_limits[limit_idx] + pos_max = self.robot_model.upper_joint_limits[limit_idx] + + # Apply safety margin to position limits + pos_range = pos_max - pos_min + margin = pos_range * (1.0 - self.safety_margin) / 2.0 + + self.position_limits[joint_name] = { + "min": pos_min + margin, + "max": pos_max - margin, + } + except ValueError as e: + print(f"[JointSafetyMonitor] Warning: Could not find 'arms' joint group: {e}") + + # Get hand joint indices from robot model joint groups + try: + hand_indices = self.robot_model.get_joint_group_indices("hands") + hand_joint_names = [self.robot_model.joint_names[i] for i in hand_indices] + + for joint_name in hand_joint_names: + # Set velocity limits only for hands (no position limits for now) + vel_limit = self.HAND_VELOCITY_LIMIT * self.safety_margin + self.velocity_limits[joint_name] = {"min": -vel_limit, "max": vel_limit} + except ValueError as e: + print(f"[JointSafetyMonitor] Warning: Could not find 'hands' joint group: {e}") + + def check_safety(self, obs: Dict, action: Dict) -> Tuple[bool, List[Dict]]: + """Check if current velocities and positions are within safe bounds. + + Args: + obs: Observation dictionary containing joint positions and velocities + action: Action dictionary containing target positions + + Returns: + (is_safe, violations): Tuple of safety status and list of violations + Note: is_safe=False only for velocity violations (triggers shutdown) + Position violations are warnings only (don't affect is_safe) + """ + self.violations = [] + is_safe = True + joint_names = self.robot_model.joint_names + + # Check current joint velocities (critical - triggers shutdown) + if "dq" in obs: + joint_velocities = obs["dq"] + + for i, joint_name in enumerate(joint_names): + # Only check monitored joints + if joint_name not in self.velocity_limits: + continue + + if i < len(joint_velocities): + velocity = joint_velocities[i] + limits = self.velocity_limits[joint_name] + + if velocity < limits["min"] or velocity > limits["max"]: + violation = { + "joint": joint_name, + "type": "velocity", + "value": velocity, + "limit_min": limits["min"], + "limit_max": limits["max"], + "exceeded_by": self._calculate_exceeded_percentage( + velocity, limits["min"], limits["max"] + ), + "critical": True, # Velocity violations are critical + } + self.violations.append(violation) + is_safe = False + + # Check current joint positions (warning only - no shutdown) + if "q" in obs: + joint_positions = obs["q"] + + for i, joint_name in enumerate(joint_names): + # Only check joints with position limits (arms) + if joint_name not in self.position_limits: + continue + + if i < len(joint_positions): + position = joint_positions[i] + limits = self.position_limits[joint_name] + + if position < limits["min"] or position > limits["max"]: + violation = { + "joint": joint_name, + "type": "position", + "value": position, + "limit_min": limits["min"], + "limit_max": limits["max"], + "exceeded_by": self._calculate_exceeded_percentage( + position, limits["min"], limits["max"] + ), + "critical": False, # Position violations are warnings only + } + self.violations.append(violation) + # Don't set is_safe = False for position violations + + return is_safe, self.violations + + def _calculate_exceeded_percentage( + self, value: float, limit_min: float, limit_max: float + ) -> float: + """Calculate by how much percentage a value exceeds the limits.""" + if value < limit_min: + return abs((value - limit_min) / limit_min) * 100 + elif value > limit_max: + return abs((value - limit_max) / limit_max) * 100 + return 0.0 + + def get_safe_action(self, obs: Dict, original_action: Dict) -> Dict: + """Generate a safe action with startup ramping for smooth initialization. + + Args: + obs: Observation dictionary containing current joint positions + original_action: The original action that may cause violations + + Returns: + Safe action with startup ramping applied if within ramp duration + """ + safe_action = original_action.copy() + + # Handle startup ramping for arm joints + if not self.startup_complete: + if self.initial_positions is None and "q" in obs: + # Store initial positions from first observation + self.initial_positions = obs["q"].copy() + + if ( + self.startup_counter < self.ramp_duration_steps + and self.initial_positions is not None + and "q" in safe_action + ): + # Ramp factor: 0.0 at start → 1.0 at end + ramp_factor = self.startup_counter / self.ramp_duration_steps + + # Apply ramping only to monitored arm joints + for joint_name in self.velocity_limits: # Only monitored arm joints + if joint_name in self.robot_model.joint_to_dof_index: + joint_idx = self.robot_model.joint_to_dof_index[joint_name] + if joint_idx < len(safe_action["q"]) and joint_idx < len( + self.initial_positions + ): + initial_pos = self.initial_positions[joint_idx] + target_pos = original_action["q"][joint_idx] + # Linear interpolation: initial + ramp_factor * (target - initial) + safe_action["q"][joint_idx] = initial_pos + ramp_factor * ( + target_pos - initial_pos + ) + + # Increment counter for next iteration + self.startup_counter += 1 + else: + # Ramping complete - use original actions + self.startup_complete = True + + return safe_action + + def get_violation_report(self, violations: Optional[List[Dict]] = None) -> str: + """Generate a formatted error report for violations. + + Args: + violations: List of violations to report (uses self.violations if None) + + Returns: + Formatted error message string + """ + if violations is None: + violations = self.violations + + if not violations: + return "No violations detected." + + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + + # Check if these are critical violations or warnings + critical_violations = [v for v in violations if v.get("critical", True)] + warning_violations = [v for v in violations if not v.get("critical", True)] + + if critical_violations and warning_violations: + report = f"Joint safety bounds exceeded!\nTimestamp: {timestamp}\nViolations:\n" + elif critical_violations: + report = f"Joint safety bounds exceeded!\nTimestamp: {timestamp}\nViolations:\n" + else: + report = f"Joint position warnings!\nTimestamp: {timestamp}\nWarnings:\n" + + for violation in violations: + joint = violation["joint"] + vtype = violation["type"] + value = violation["value"] + exceeded = violation["exceeded_by"] + limit_min = violation["limit_min"] + limit_max = violation["limit_max"] + + if vtype == "velocity": + report += f" - {joint}: {vtype}={value:.3f} rad/s " + report += f"(limit: ±{limit_max:.3f} rad/s) - " + report += f"EXCEEDED by {exceeded:.1f}%\n" + elif vtype == "position": + report += f" - {joint}: {vtype}={value:.3f} rad " + report += f"(limits: [{limit_min:.3f}, {limit_max:.3f}] rad) - " + report += f"EXCEEDED by {exceeded:.1f}%\n" + + # Add appropriate action message + if critical_violations: + report += "Action: Safe mode engaged (kp=0, tau=0). System shutdown initiated.\n" + report += "Please restart Docker container to resume operation." + else: + report += "Action: Position warning only. Robot continues operation." + + return report + + def handle_violations(self, obs: Dict, action: Dict) -> Dict: + """Check safety and handle violations appropriately. + + Args: + obs: Observation dictionary + action: Action dictionary + + Returns: + Dict with keys: + - 'safe_to_continue': bool - whether robot should continue operation + - 'action': Dict - potentially modified safe action + - 'shutdown_required': bool - whether system shutdown is needed + """ + is_safe, violations = self.check_safety(obs, action) + + # Apply startup ramping (always, regardless of violations) + safe_action = self.get_safe_action(obs, action) + + # Visualize arm and hand joint positions and velocities if enabled + if self.enable_viz: + if ( + self.right_arm_indices is not None + and self.left_arm_indices is not None + and self.right_hand_indices is not None + and self.left_hand_indices is not None + and "q" in obs + and "dq" in obs + and self.viz is not None + ): + try: + right_arm_positions = obs["q"][self.right_arm_indices] + left_arm_positions = obs["q"][self.left_arm_indices] + right_arm_velocities = obs["dq"][self.right_arm_indices] + left_arm_velocities = obs["dq"][self.left_arm_indices] + right_hand_positions = obs["q"][self.right_hand_indices] + left_hand_positions = obs["q"][self.left_hand_indices] + right_hand_velocities = obs["dq"][self.right_hand_indices] + left_hand_velocities = obs["dq"][self.left_hand_indices] + tensor_dict = { + self.right_arm_pos_key: right_arm_positions, + self.left_arm_pos_key: left_arm_positions, + self.right_arm_vel_key: right_arm_velocities, + self.left_arm_vel_key: left_arm_velocities, + self.right_hand_pos_key: right_hand_positions, + self.left_hand_pos_key: left_hand_positions, + self.right_hand_vel_key: right_hand_velocities, + self.left_hand_vel_key: left_hand_velocities, + } + self.viz.plot_tensors(tensor_dict, time.time()) + except Exception: + pass + + if not violations: + return {"safe_to_continue": True, "action": safe_action, "shutdown_required": False} + + # Separate critical (velocity) and warning (position) violations + critical_violations = [v for v in violations if v.get("critical", True)] + # warning_violations = [v for v in violations if not v.get('critical', True)] + + # Print warnings for position violations + # if warning_violations: + # warning_msg = self.get_violation_report(warning_violations) + # print(f"[SAFETY WARNING] {warning_msg}") + + # Handle critical violations (velocity) - trigger shutdown + if not is_safe and critical_violations: + error_msg = self.get_violation_report(critical_violations) + if self.env_type == "real": + print(f"[SAFETY VIOLATION] {error_msg}") + self.trigger_system_shutdown() + + return {"safe_to_continue": False, "action": safe_action, "shutdown_required": True} + + # Only position violations - continue with safe action + return {"safe_to_continue": True, "action": safe_action, "shutdown_required": False} + + def trigger_system_shutdown(self): + """Trigger system shutdown after safety violation.""" + print("\n[SAFETY] Initiating system shutdown due to safety violation...") + sys.exit(1) + + +def main(): + """Test the joint safety monitor with joint groups.""" + print("Testing joint safety monitor with joint groups...") + + try: + from decoupled_wbc.control.robot_model.instantiation.g1 import instantiate_g1_robot_model + + # Instantiate robot model + robot_model = instantiate_g1_robot_model() + print(f"Robot model created with {len(robot_model.joint_names)} joints") + + # Create safety monitor + safety_monitor = JointSafetyMonitor(robot_model) + print("Safety monitor created successfully!") + print(f"Monitoring {len(safety_monitor.velocity_limits)} joints") + + # Print monitored joints + print("\nVelocity limits:") + for joint_name, limits in safety_monitor.velocity_limits.items(): + print(f" - {joint_name}: ±{limits['max']:.2f} rad/s") + + print(f"\nPosition limits (arms only): {len(safety_monitor.position_limits)} joints") + for joint_name, limits in safety_monitor.position_limits.items(): + print(f" - {joint_name}: [{limits['min']:.3f}, {limits['max']:.3f}] rad") + + # Test safety checking with safe values + print("\n--- Testing Safety Checking ---") + + # Create mock observation with safe values + safe_obs = { + "q": np.zeros(robot_model.num_dofs), # All joints at zero position + "dq": np.zeros(robot_model.num_dofs), # All joints at zero velocity + } + safe_action = {"q": np.zeros(robot_model.num_dofs)} + + # Test handle_violations method + result = safety_monitor.handle_violations(safe_obs, safe_action) + print( + f"Safe values test: safe_to_continue={result['safe_to_continue']}, " + f"shutdown_required={result['shutdown_required']}" + ) + + # Test with unsafe velocity + unsafe_obs = safe_obs.copy() + unsafe_obs["dq"] = np.zeros(robot_model.num_dofs) + # Set left shoulder pitch velocity to exceed limit + left_shoulder_idx = robot_model.dof_index("left_shoulder_pitch_joint") + unsafe_obs["dq"][left_shoulder_idx] = 6.0 # Exceeds 5.0 rad/s limit + + print("\nUnsafe velocity test:") + result = safety_monitor.handle_violations(unsafe_obs, safe_action) + print( + f" safe_to_continue={result['safe_to_continue']}, shutdown_required={result['shutdown_required']}" + ) + + # Test with unsafe position only + unsafe_pos_obs = safe_obs.copy() + unsafe_pos_obs["q"] = np.zeros(robot_model.num_dofs) + # Set left shoulder pitch position to exceed limit + unsafe_pos_obs["q"][left_shoulder_idx] = -4.0 # Exceeds lower limit of -3.089 + + print("\nUnsafe position test:") + result = safety_monitor.handle_violations(unsafe_pos_obs, safe_action) + print( + f" safe_to_continue={result['safe_to_continue']}, shutdown_required={result['shutdown_required']}" + ) + + print("\nAll tests completed successfully!") + + except Exception as e: + print(f"Test failed with error: {e}") + import traceback + + traceback.print_exc() + + +if __name__ == "__main__": + main() diff --git a/gr00t_wbc/control/envs/g1/utils/state_processor.py b/decoupled_wbc/control/envs/g1/utils/state_processor.py similarity index 100% rename from gr00t_wbc/control/envs/g1/utils/state_processor.py rename to decoupled_wbc/control/envs/g1/utils/state_processor.py diff --git a/gr00t_wbc/control/envs/robocasa/__init__.py b/decoupled_wbc/control/envs/robocasa/__init__.py similarity index 100% rename from gr00t_wbc/control/envs/robocasa/__init__.py rename to decoupled_wbc/control/envs/robocasa/__init__.py diff --git a/decoupled_wbc/control/envs/robocasa/async_env_server.py b/decoupled_wbc/control/envs/robocasa/async_env_server.py new file mode 100644 index 0000000..bfd4a19 --- /dev/null +++ b/decoupled_wbc/control/envs/robocasa/async_env_server.py @@ -0,0 +1,305 @@ +from abc import abstractmethod +import threading +import time +from typing import Any, Dict, Tuple + +import mujoco +import numpy as np +import rclpy + +from decoupled_wbc.control.envs.g1.sim.image_publish_utils import ImagePublishProcess +from decoupled_wbc.control.envs.robocasa.utils.robocasa_env import ( + Gr00tLocomanipRoboCasaEnv, +) # noqa: F401 +from decoupled_wbc.control.robot_model.robot_model import RobotModel +from decoupled_wbc.control.utils.keyboard_dispatcher import KeyboardListenerSubscriber + + +class RoboCasaEnvServer: + """ + This class is responsible for running the simulation environment loop in a separate thread. + It communicates with the main thread via the `publish_obs` and `get_action` methods through `channel_bridge`. + It will also handle the viewer sync when `onscreen` is True. + """ + + def __init__( + self, + env_name: str, + robot_name: str, + robot_model: RobotModel, + env_kwargs: Dict[str, Any], + **kwargs, + ): + # initialize environment + if env_kwargs.get("onscreen", False): + env_kwargs["onscreen"] = False + self.onscreen = True # onscreen render in the main thread + self.render_camera = env_kwargs.get("render_camera", None) + else: + self.onscreen = False + self.env_name = env_name + self.env = Gr00tLocomanipRoboCasaEnv(env_name, robot_name, robot_model, **env_kwargs) + self.init_caches() + self.cache_lock = threading.Lock() + + # initialize channel + self.init_channel() + + # initialize ROS2 node + if not rclpy.ok(): + rclpy.init() + self.node = rclpy.create_node("sim_robocasa") + self.thread = threading.Thread(target=rclpy.spin, args=(self.node,), daemon=True) + self.thread.start() + else: + self.thread = None + executor = rclpy.get_global_executor() + self.node = executor.get_nodes()[0] # will only take the first node + + self.control_freq = env_kwargs.get("control_freq", 1 / 0.02) + self.sim_freq = kwargs.get("sim_freq", 1 / 0.005) + self.control_rate = self.node.create_rate(self.control_freq) + + self.running = False + self.sim_thread = None + self.sync_lock = threading.Lock() + + self.sync_mode = kwargs.get("sync_mode", False) + self.steps_per_action = kwargs.get("steps_per_action", 1) + + self.image_dt = kwargs.get("image_dt", 0.04) + self.image_publish_process = None + self.viewer_freq = kwargs.get("viewer_freq", 1 / 0.02) + self.viewer = None + + self.verbose = kwargs.get("verbose", True) + + # Initialize keyboard listener for env reset + self.keyboard_listener = KeyboardListenerSubscriber() + + self.reset() + + @property + def base_env(self): + return self.env.env + + def start_image_publish_subprocess(self, start_method: str = "spawn", camera_port: int = 5555): + """Initialize image publishing subprocess if cameras are configured""" + if len(self.env.camera_names) == 0: + print( + "Warning: No camera configs provided, image publishing subprocess will not be started" + ) + return + + # Build camera configs from env camera settings + camera_configs = {} + for env_cam_name in self.env.camera_names: + camera_config = self.env.camera_key_mapper.get_camera_config(env_cam_name) + mapped_cam_name, cam_width, cam_height = camera_config + camera_configs[mapped_cam_name] = {"height": cam_height, "width": cam_width} + + self.image_publish_process = ImagePublishProcess( + camera_configs=camera_configs, + image_dt=self.image_dt, + zmq_port=camera_port, + start_method=start_method, + verbose=self.verbose, + ) + + self.image_publish_process.start_process() + + def update_render_caches(self, obs: Dict[str, Any]): + """Update render cache and shared memory for subprocess""" + if self.image_publish_process is None: + return + + # Extract image observations from obs dict + render_caches = { + k: v for k, v in obs.items() if k.endswith("_image") and isinstance(v, np.ndarray) + } + + # Update shared memory if image publishing process is available + if render_caches: + self.image_publish_process.update_shared_memory(render_caches) + + def init_caches(self): + self.caches = { + "obs": None, + "reward": None, + "terminated": None, + "truncated": None, + "info": None, + } + + def reset(self, **kwargs): + if self.viewer is not None: + self.viewer.close() + + obs, info = self.env.reset(**kwargs) + self.caches["obs"] = obs + self.caches["reward"] = 0 + self.caches["terminated"] = False + self.caches["truncated"] = False + self.caches["info"] = info + + # initialize viewer + if self.onscreen: + self.viewer = mujoco.viewer.launch_passive( + self.base_env.sim.model._model, + self.base_env.sim.data._data, + show_left_ui=False, + show_right_ui=False, + ) + self.viewer.opt.geomgroup[0] = 0 # disable collision visualization + if self.render_camera is not None: + self.viewer.cam.type = mujoco.mjtCamera.mjCAMERA_FIXED + self.viewer.cam.fixedcamid = self.base_env.sim.model._model.cam( + self.render_camera + ).id + + # self.episode_state.reset_state() + return obs, info + + @abstractmethod + def init_channel(self): + raise NotImplementedError("init_channel must be implemented by the subclass") + + @abstractmethod + def publish_obs(self): + raise NotImplementedError("publish_obs must be implemented by the subclass") + + @abstractmethod + def get_action(self) -> Tuple[Dict[str, Any], bool, bool]: + raise NotImplementedError("get_action must be implemented by the subclass") + + def start_as_thread(self): + """Start the simulation thread""" + if self.sim_thread is not None and self.sim_thread.is_alive(): + return + + self.sim_thread = threading.Thread(target=self.start) + self.sim_thread.daemon = True + self.sim_thread.start() + + def set_sync_mode(self, sync_mode: bool, steps_per_action: int = 4): + """Set the sync mode of the environment server""" + with self.sync_lock: + self.sync_mode = sync_mode + self.steps_per_action = steps_per_action + + def _check_keyboard_input(self): + """Check for keyboard input and handle state transitions""" + key = self.keyboard_listener.read_msg() + if key == "k": + print("\033[1;32m[Sim env]\033[0m Resetting sim environment") + self.reset() + + def start(self): + """Function executed by the simulation thread""" + iter_idx = 0 + steps_per_cur_action = 0 + t_start = time.monotonic() + + self.running = True + + while self.running: + # Check keyboard input for state transitions + self._check_keyboard_input() + + # Publish observations and get new action + self.publish_obs() + action, ready, is_new_action = self.get_action() + # ready is True if the action is received from the control loop + # is_new_action is True if the action is new (not the same as the previous action) + with self.sync_lock: + sync_mode = self.sync_mode + max_steps_per_action = self.steps_per_action + + # Process action if ready and within step limits + action_should_apply = ready and ( + (not sync_mode) or steps_per_cur_action < max_steps_per_action + ) + if action_should_apply: + obs, reward, terminated, truncated, info = self.env.step(action) + with self.cache_lock: + self.caches["obs"] = obs + self.caches["reward"] = reward + self.caches["terminated"] = terminated + self.caches["truncated"] = truncated + self.caches["info"] = info + + if reward == 1.0 and iter_idx % 50 == 0: + print("\033[92mTask successful. Can save data now.\033[0m") + + iter_idx += 1 + steps_per_cur_action += 1 + if self.verbose and sync_mode: + print("steps_per_cur_action: ", steps_per_cur_action) + + # Update render caches at image publishing rate + if action_should_apply and iter_idx % int(self.image_dt * self.control_freq) == 0: + with self.cache_lock: + obs_copy = self.caches["obs"].copy() + self.update_render_caches(obs_copy) + + # Reset step counter for new actions + if is_new_action: + steps_per_cur_action = 0 + + # Update viewer at specified frequency + if self.onscreen and iter_idx % (self.control_freq / self.viewer_freq) == 0: + self.viewer.sync() + + # Check if we're meeting the desired control frequency + if iter_idx % 100 == 0: + end_time = time.monotonic() + if self.verbose: + print( + f"sim FPS: {100.0 / (end_time - t_start) * (self.sim_freq / self.control_freq)}" + ) + if (end_time - t_start) > ((110.0 / self.control_freq)): # for tolerance + print( + f"Warning: Sim runs at " + "{100.0/(end_time - t_start) * (self.sim_freq / self.control_freq):.1f}Hz, " + f"but should run at {self.sim_freq:.1f}Hz" + ) + t_start = end_time + + # reset obj pos every 200 steps + if iter_idx % 200 == 0: + if hasattr(self.base_env, "reset_obj_pos"): + self.base_env.reset_obj_pos() + + self.control_rate.sleep() + + def get_privileged_obs(self): + """Get privileged observation. Should be implemented by subclasses.""" + obs = {} + with self.cache_lock: + if hasattr(self.base_env, "get_privileged_obs_keys"): + for key in self.base_env.get_privileged_obs_keys(): + obs[key] = self.caches["obs"][key] + + for key in self.caches["obs"].keys(): + if key.endswith("_image"): + obs[key] = self.caches["obs"][key] + + return obs + + def stop(self): + """Stop the simulation thread""" + self.running = False + if self.sim_thread is not None: + self.sim_thread.join(timeout=1.0) # Wait for thread to finish with timeout + self.sim_thread = None + + def close(self): + self.stop() + if self.image_publish_process is not None: + self.image_publish_process.stop() + if self.onscreen: + self.viewer.close() + self.env.close() + + def get_reward(self): + return self.base_env.reward() diff --git a/decoupled_wbc/control/envs/robocasa/sync_env.py b/decoupled_wbc/control/envs/robocasa/sync_env.py new file mode 100644 index 0000000..31cd465 --- /dev/null +++ b/decoupled_wbc/control/envs/robocasa/sync_env.py @@ -0,0 +1,586 @@ +import sys +from typing import Any, Dict, Tuple + +import gymnasium as gym +from gymnasium.envs.registration import register +import numpy as np +from robocasa.environments.locomanipulation import REGISTERED_LOCOMANIPULATION_ENVS +from robocasa.models.robots import GR00T_LOCOMANIP_ENVS_ROBOTS +from robosuite.environments.robot_env import RobotEnv +from scipy.spatial.transform import Rotation as R + +from decoupled_wbc.control.envs.g1.utils.joint_safety import JointSafetyMonitor +from decoupled_wbc.control.envs.robocasa.utils.controller_utils import ( + update_robosuite_controller_configs, +) +from decoupled_wbc.control.envs.robocasa.utils.robocasa_env import ( # noqa: F401 + ALLOWED_LANGUAGE_CHARSET, + Gr00tLocomanipRoboCasaEnv, +) +from decoupled_wbc.control.robot_model.instantiation import get_robot_type_and_model +from decoupled_wbc.control.utils.n1_utils import ( + prepare_gym_space_for_eval, + prepare_observation_for_eval, +) +from decoupled_wbc.data.constants import RS_VIEW_CAMERA_HEIGHT, RS_VIEW_CAMERA_WIDTH + + +class SyncEnv(gym.Env): + MAX_MUJOCO_STATE_LEN = 800 + + def __init__(self, env_name, robot_name, **kwargs): + self.env_name = env_name + self.robot_name = robot_name + self.onscreen = kwargs.get("onscreen", True) + self.enable_gravity_compensation = kwargs.pop("enable_gravity_compensation", False) + self.gravity_compensation_joints = kwargs.pop("gravity_compensation_joints", ["arms"]) + _, self.robot_model = get_robot_type_and_model( + robot_name, enable_waist_ik=kwargs.pop("enable_waist", False) + ) + + env_kwargs = { + "onscreen": kwargs.get("onscreen", True), + "offscreen": kwargs.get("offscreen", False), + "renderer": kwargs.get("renderer", "mjviewer"), + "render_camera": kwargs.get("render_camera", "frontview"), + "camera_names": kwargs.get("camera_names", ["frontview"]), + "camera_heights": kwargs.get("camera_heights", None), + "camera_widths": kwargs.get("camera_widths", None), + "controller_configs": kwargs["controller_configs"], + "control_freq": kwargs.get("control_freq", 50), + "translucent_robot": kwargs.get("translucent_robot", True), + "ik_indicator": kwargs.get("ik_indicator", False), + "randomize_cameras": kwargs.get("randomize_cameras", True), + } + self.env = Gr00tLocomanipRoboCasaEnv( + env_name, robot_name, robot_model=self.robot_model, **env_kwargs + ) + self.init_cache() + + self.reset() + + @property + def base_env(self) -> RobotEnv: + return self.env.env + + def overwrite_floating_base_action(self, navigate_cmd): + if self.base_env.robots[0].robot_model.default_base == "FloatingLeggedBase": + self.env.unwrapped.overridden_floating_base_action = navigate_cmd + + def get_mujoco_state_info(self): + mujoco_state = self.base_env.sim.get_state().flatten() + assert len(mujoco_state) < SyncEnv.MAX_MUJOCO_STATE_LEN + padding_width = SyncEnv.MAX_MUJOCO_STATE_LEN - len(mujoco_state) + padded_mujoco_state = np.pad( + mujoco_state, (0, padding_width), mode="constant", constant_values=0 + ) + max_mujoco_state_len = SyncEnv.MAX_MUJOCO_STATE_LEN + mujoco_state_len = len(mujoco_state) + mujoco_state = padded_mujoco_state.copy() + return max_mujoco_state_len, mujoco_state_len, mujoco_state + + def reset_to(self, state: Dict[str, Any]) -> Dict[str, Any] | None: + if hasattr(self.base_env, "reset_to"): + self.base_env.reset_to(state) + else: + # todo: maybe update robosuite to have reset_to() + env = self.base_env + if "model_file" in state: + xml = env.edit_model_xml(state["model_file"]) + env.reset_from_xml_string(xml) + env.sim.reset() + if "states" in state: + env.sim.set_state_from_flattened(state["states"]) + env.sim.forward() + + obs = self.env.force_update_observation(timestep=0) + self.cache["obs"] = obs + return + + def get_state(self) -> Dict[str, Any]: + return self.base_env.get_state() + + def is_success(self): + """ + Check if the task condition(s) is reached. Should return a dictionary + { str: bool } with at least a "task" key for the overall task success, + and additional optional keys corresponding to other task criteria. + """ + # First, try to use the base environment's is_success method if it exists + if hasattr(self.base_env, "is_success"): + return self.base_env.is_success() + + # Fall back to using _check_success if available + elif hasattr(self.base_env, "_check_success"): + succ = self.base_env._check_success() + if isinstance(succ, dict): + assert "task" in succ + return succ + return {"task": succ} + + # If neither method exists, return failure + else: + return {"task": False} + + def init_cache(self): + self.cache = { + "obs": None, + "reward": None, + "terminated": None, + "truncated": None, + "info": None, + } + + def reset(self, seed=None, options=None) -> Tuple[Dict[str, any], Dict[str, any]]: + self.init_cache() + obs, info = self.env.reset(seed=seed, options=options) + self.cache["obs"] = obs + self.cache["reward"] = 0 + self.cache["terminated"] = False + self.cache["truncated"] = False + self.cache["info"] = info + return self.observe(), info + + def observe(self) -> Dict[str, any]: + # Get observations from body and hands + assert ( + self.cache["obs"] is not None + ), "Observation cache is not initialized, please reset the environment first" + raw_obs = self.cache["obs"] + + # Body and hand joint measurements come in actuator order, so we need to convert them to joint order + whole_q = self.robot_model.get_configuration_from_actuated_joints( + body_actuated_joint_values=raw_obs["body_q"], + left_hand_actuated_joint_values=raw_obs["left_hand_q"], + right_hand_actuated_joint_values=raw_obs["right_hand_q"], + ) + whole_dq = self.robot_model.get_configuration_from_actuated_joints( + body_actuated_joint_values=raw_obs["body_dq"], + left_hand_actuated_joint_values=raw_obs["left_hand_dq"], + right_hand_actuated_joint_values=raw_obs["right_hand_dq"], + ) + whole_ddq = self.robot_model.get_configuration_from_actuated_joints( + body_actuated_joint_values=raw_obs["body_ddq"], + left_hand_actuated_joint_values=raw_obs["left_hand_ddq"], + right_hand_actuated_joint_values=raw_obs["right_hand_ddq"], + ) + whole_tau_est = self.robot_model.get_configuration_from_actuated_joints( + body_actuated_joint_values=raw_obs["body_tau_est"], + left_hand_actuated_joint_values=raw_obs["left_hand_tau_est"], + right_hand_actuated_joint_values=raw_obs["right_hand_tau_est"], + ) + eef_obs = self.get_eef_obs(whole_q) + + obs = { + "q": whole_q, + "dq": whole_dq, + "ddq": whole_ddq, + "tau_est": whole_tau_est, + "floating_base_pose": raw_obs["floating_base_pose"], + "floating_base_vel": raw_obs["floating_base_vel"], + "floating_base_acc": raw_obs["floating_base_acc"], + "wrist_pose": np.concatenate([eef_obs["left_wrist_pose"], eef_obs["right_wrist_pose"]]), + } + + # Add state keys for model input + obs = prepare_observation_for_eval(self.robot_model, obs) + + obs["annotation.human.task_description"] = raw_obs["language.language_instruction"] + + if hasattr(self.base_env, "get_privileged_obs_keys"): + for key in self.base_env.get_privileged_obs_keys(): + obs[key] = raw_obs[key] + + for key in raw_obs.keys(): + if key.endswith("_image"): + obs[key] = raw_obs[key] + # TODO: add video.key without _image suffix for evaluation, convert to uint8, remove later + obs[f"video.{key.replace('_image', '')}"] = raw_obs[key] + return obs + + def step( + self, action: Dict[str, any] + ) -> Tuple[Dict[str, any], float, bool, bool, Dict[str, any]]: + self.queue_action(action) + return self.get_step_info() + + def get_observation(self): + return self.base_env._get_observations() # assumes base env is robosuite + + def get_step_info(self) -> Tuple[Dict[str, any], float, bool, bool, Dict[str, any]]: + return ( + self.observe(), + self.cache["reward"], + self.cache["terminated"], + self.cache["truncated"], + self.cache["info"], + ) + + def convert_q_to_actuated_joint_order(self, q: np.ndarray) -> np.ndarray: + body_q = self.robot_model.get_body_actuated_joints(q) + left_hand_q = self.robot_model.get_hand_actuated_joints(q, side="left") + right_hand_q = self.robot_model.get_hand_actuated_joints(q, side="right") + whole_q = np.zeros_like(q) + whole_q[self.robot_model.get_joint_group_indices("body")] = body_q + whole_q[self.robot_model.get_joint_group_indices("left_hand")] = left_hand_q + whole_q[self.robot_model.get_joint_group_indices("right_hand")] = right_hand_q + + return whole_q + + def set_ik_indicator(self, teleop_cmd): + """Set the IK indicators for the simulator""" + if "left_wrist" in teleop_cmd and "right_wrist" in teleop_cmd: + left_wrist_input_pose = teleop_cmd["left_wrist"] + right_wrist_input_pose = teleop_cmd["right_wrist"] + ik_wrapper = self.base_env + ik_wrapper.set_target_poses_outside_env([left_wrist_input_pose, right_wrist_input_pose]) + + def render(self): + if self.base_env.viewer is not None: + self.base_env.viewer.update() + if self.onscreen: + self.base_env.render() + + def queue_action(self, action: Dict[str, any]): + # action is in pinocchio joint order, we need to convert it to actuator order + action_q = self.convert_q_to_actuated_joint_order(action["q"]) + + # Compute gravity compensation torques if enabled + tau_q = np.zeros_like(action_q) + if self.enable_gravity_compensation and self.robot_model is not None: + try: + # Get current robot configuration from cache (more efficient than observe()) + raw_obs = self.cache["obs"] + + # Convert from actuated joint order to joint order for Pinocchio + current_q_joint_order = self.robot_model.get_configuration_from_actuated_joints( + body_actuated_joint_values=raw_obs["body_q"], + left_hand_actuated_joint_values=raw_obs["left_hand_q"], + right_hand_actuated_joint_values=raw_obs["right_hand_q"], + ) + + # Compute gravity compensation in joint order using current robot configuration + gravity_torques_joint_order = self.robot_model.compute_gravity_compensation_torques( + current_q_joint_order, joint_groups=self.gravity_compensation_joints + ) + + # Convert gravity torques to actuated joint order + gravity_torques_actuated = self.convert_q_to_actuated_joint_order( + gravity_torques_joint_order + ) + + # Add gravity compensation to torques + tau_q += gravity_torques_actuated + + except Exception as e: + print(f"Error applying gravity compensation in sync_env: {e}") + + obs, reward, terminated, truncated, info = self.env.step({"q": action_q, "tau": tau_q}) + self.cache["obs"] = obs + self.cache["reward"] = reward + self.cache["terminated"] = terminated + self.cache["truncated"] = truncated + self.cache["info"] = info + + def queue_state(self, state: Dict[str, any]): + # This function is for debugging or cross-playback between sim and real only. + state_q = self.convert_q_to_actuated_joint_order(state["q"]) + obs, reward, terminated, truncated, info = self.env.unwrapped.step_only_kinematics( + {"q": state_q} + ) + self.cache["obs"] = obs + self.cache["reward"] = reward + self.cache["terminated"] = terminated + self.cache["truncated"] = truncated + self.cache["info"] = info + + @property + def observation_space(self) -> gym.Space: + # @todo: check if the low and high bounds are correct for body_obs. + q_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(self.robot_model.num_dofs,)) + dq_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(self.robot_model.num_dofs,)) + ddq_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(self.robot_model.num_dofs,)) + tau_est_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(self.robot_model.num_dofs,)) + floating_base_pose_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(7,)) + floating_base_vel_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(6,)) + floating_base_acc_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(6,)) + wrist_pose_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(7 + 7,)) + + obs_space = gym.spaces.Dict( + { + "floating_base_pose": floating_base_pose_space, + "floating_base_vel": floating_base_vel_space, + "floating_base_acc": floating_base_acc_space, + "q": q_space, + "dq": dq_space, + "ddq": ddq_space, + "tau_est": tau_est_space, + "wrist_pose": wrist_pose_space, + } + ) + + obs_space = prepare_gym_space_for_eval(self.robot_model, obs_space) + + obs_space["annotation.human.task_description"] = gym.spaces.Text( + max_length=256, charset=ALLOWED_LANGUAGE_CHARSET + ) + + if hasattr(self.base_env, "get_privileged_obs_keys"): + for key, shape in self.base_env.get_privileged_obs_keys().items(): + space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=shape) + obs_space[key] = space + + robocasa_obs_space = self.env.observation_space + for key in robocasa_obs_space.keys(): + if key.endswith("_image"): + space = gym.spaces.Box( + low=-np.inf, high=np.inf, shape=robocasa_obs_space[key].shape + ) + obs_space[key] = space + # TODO: add video.key without _image suffix for evaluation, remove later + space_uint = gym.spaces.Box(low=0, high=255, shape=space.shape, dtype=np.uint8) + obs_space[f"video.{key.replace('_image', '')}"] = space_uint + + return obs_space + + def reset_obj_pos(self): + # For Tairan's goal-reaching task, a hacky way to reset the object position is needed. + if hasattr(self.base_env, "reset_obj_pos"): + self.base_env.reset_obj_pos() + + @property + def action_space(self) -> gym.Space: + return self.env.action_space + + def close(self): + self.env.close() + + def __repr__(self): + return ( + f"SyncEnv(env_name={self.env_name}, \n" + f" observation_space={self.observation_space}, \n" + f" action_space={self.action_space})" + ) + + def get_joint_gains(self): + controller = self.base_env.robots[0].composite_controller + + gains = {} + key_mapping = { + "left": "left_arm", + "right": "right_arm", + "legs": "legs", + "torso": "waist", + "head": "neck", + } + for k in controller.part_controllers.keys(): + if hasattr(controller.part_controllers[k], "kp"): + if k in key_mapping: + gains[key_mapping[k]] = controller.part_controllers[k].kp + else: + gains[k] = controller.part_controllers[k].kp + gains.update( + { + "left_hand": self.base_env.sim.model.actuator_gainprm[ + self.base_env.robots[0]._ref_actuators_indexes_dict["left_gripper"], 0 + ], + "right_hand": self.base_env.sim.model.actuator_gainprm[ + self.base_env.robots[0]._ref_actuators_indexes_dict["right_gripper"], 0 + ], + } + ) + joint_gains = np.zeros(self.robot_model.num_dofs) + for k in gains.keys(): + joint_gains[self.robot_model.get_joint_group_indices(k)] = gains[k] + return joint_gains + + def get_joint_damping(self): + controller = self.base_env.robots[0].composite_controller + damping = {} + key_mapping = { + "left": "left_arm", + "right": "right_arm", + "legs": "legs", + "torso": "waist", + "head": "neck", + } + for k in controller.part_controllers.keys(): + if hasattr(controller.part_controllers[k], "kd"): + if k in key_mapping: + damping[key_mapping[k]] = controller.part_controllers[k].kd + else: + damping[k] = controller.part_controllers[k].kd + damping.update( + { + "left_hand": -self.base_env.sim.model.actuator_biasprm[ + self.base_env.robots[0]._ref_actuators_indexes_dict["left_gripper"], 2 + ], + "right_hand": -self.base_env.sim.model.actuator_biasprm[ + self.base_env.robots[0]._ref_actuators_indexes_dict["right_gripper"], 2 + ], + } + ) + joint_damping = np.zeros(self.robot_model.num_dofs) + for k in damping.keys(): + joint_damping[self.robot_model.get_joint_group_indices(k)] = damping[k] + return joint_damping + + def get_eef_obs(self, q: np.ndarray) -> Dict[str, np.ndarray]: + self.robot_model.cache_forward_kinematics(q) + eef_obs = {} + for side in ["left", "right"]: + wrist_placement = self.robot_model.frame_placement( + self.robot_model.supplemental_info.hand_frame_names[side] + ) + wrist_pos, wrist_quat = wrist_placement.translation[:3], R.from_matrix( + wrist_placement.rotation + ).as_quat(scalar_first=True) + eef_obs[f"{side}_wrist_pose"] = np.concatenate([wrist_pos, wrist_quat]) + + return eef_obs + + +class G1SyncEnv(SyncEnv): + def __init__( + self, + env_name, + robot_name, + **kwargs, + ): + renderer = kwargs.get("renderer", "mjviewer") + if renderer == "mjviewer": + default_render_camera = ["robot0_oak_egoview"] + elif renderer in ["mujoco", "rerun"]: + default_render_camera = [ + "robot0_oak_egoview", + "robot0_oak_left_monoview", + "robot0_oak_right_monoview", + ] + else: + raise NotImplementedError + default_camera_names = [ + "robot0_oak_egoview", + "robot0_oak_left_monoview", + "robot0_oak_right_monoview", + ] + default_camera_heights = [ + RS_VIEW_CAMERA_HEIGHT, + RS_VIEW_CAMERA_HEIGHT, + RS_VIEW_CAMERA_HEIGHT, + ] + default_camera_widths = [ + RS_VIEW_CAMERA_WIDTH, + RS_VIEW_CAMERA_WIDTH, + RS_VIEW_CAMERA_WIDTH, + ] + + kwargs.update( + { + "onscreen": kwargs.get("onscreen", True), + "offscreen": kwargs.get("offscreen", False), + "render_camera": kwargs.get("render_camera", default_render_camera), + "camera_names": kwargs.get("camera_names", default_camera_names), + "camera_heights": kwargs.get("camera_heights", default_camera_heights), + "camera_widths": kwargs.get("camera_widths", default_camera_widths), + "translucent_robot": kwargs.get("translucent_robot", False), + } + ) + super().__init__(env_name=env_name, robot_name=robot_name, **kwargs) + + # Initialize safety monitor (visualization disabled) - G1 specific + self.safety_monitor = JointSafetyMonitor( + self.robot_model, + enable_viz=False, + env_type="sim", # G1SyncEnv is always simulation + ) + self.safety_monitor.ramp_duration_steps = 0 + self.safety_monitor.startup_complete = True + self.safety_monitor.LOWER_BODY_VELOCITY_LIMIT = ( + 1e10 # disable lower body velocity limits since it impacts WBC + ) + self.last_safety_ok = True # Track last safety status from queue_action + + @property + def observation_space(self): + obs_space = super().observation_space + obs_space["torso_quat"] = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(4,)) + obs_space["torso_ang_vel"] = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(3,)) + return obs_space + + def observe(self): + obs = super().observe() + obs["torso_quat"] = self.cache["obs"]["secondary_imu_quat"] + obs["torso_ang_vel"] = self.cache["obs"]["secondary_imu_vel"][3:6] + return obs + + def queue_action(self, action: Dict[str, any]): + # Safety check before queuing action + obs = self.observe() + safety_result = self.safety_monitor.handle_violations(obs, action) + action = safety_result["action"] + # Save safety status for efficient access + self.last_safety_ok = not safety_result.get("shutdown_required", False) + # Check if shutdown is required + if safety_result["shutdown_required"]: + self.safety_monitor.trigger_system_shutdown() + + # Call parent queue_action with potentially modified action + super().queue_action(action) + + def get_joint_safety_status(self) -> bool: + """Get current joint safety status from the last queue_action safety check. + + Returns: + bool: True if joints are safe (no shutdown required), False if unsafe + """ + return self.last_safety_ok + + +def create_gym_sync_env_class(env, robot, robot_alias, wbc_version): + class_name = f"{env}_{robot}_{wbc_version}" + id_name = f"gr00tlocomanip_{robot_alias}/{class_name}" + + if robot_alias.startswith("g1"): + env_class_type = G1SyncEnv + elif robot_alias.startswith("gr1"): + env_class_type = globals().get("GR1SyncEnv", SyncEnv) + else: + env_class_type = SyncEnv + + controller_configs = update_robosuite_controller_configs( + robot=robot, + wbc_version=wbc_version, + ) + + env_class_type = type( + class_name, + (env_class_type,), + { + "__init__": lambda self, **kwargs: super(self.__class__, self).__init__( + env_name=env, + robot_name=robot, + controller_configs=controller_configs, + **kwargs, + ) + }, + ) + + current_module = sys.modules["decoupled_wbc.control.envs.robocasa.sync_env"] + setattr(current_module, class_name, env_class_type) + register( + id=id_name, # Unique ID for the environment + entry_point=f"decoupled_wbc.control.envs.robocasa.sync_env:{class_name}", + ) + + +WBC_VERSION = "gear_wbc" + +for ENV in REGISTERED_LOCOMANIPULATION_ENVS: + for ROBOT, ROBOT_ALIAS in GR00T_LOCOMANIP_ENVS_ROBOTS.items(): + create_gym_sync_env_class(ENV, ROBOT, ROBOT_ALIAS, WBC_VERSION) + + +if __name__ == "__main__": + + env = gym.make("gr00tlocomanip_g1_sim/PnPBottle_g1_gear_wbc") + print(env.observation_space) diff --git a/gr00t_wbc/control/envs/robocasa/utils/__init__.py b/decoupled_wbc/control/envs/robocasa/utils/__init__.py similarity index 100% rename from gr00t_wbc/control/envs/robocasa/utils/__init__.py rename to decoupled_wbc/control/envs/robocasa/utils/__init__.py diff --git a/decoupled_wbc/control/envs/robocasa/utils/cam_key_converter.py b/decoupled_wbc/control/envs/robocasa/utils/cam_key_converter.py new file mode 100644 index 0000000..7598bbf --- /dev/null +++ b/decoupled_wbc/control/envs/robocasa/utils/cam_key_converter.py @@ -0,0 +1,73 @@ +from dataclasses import dataclass +from typing import Dict, Optional, Tuple + +from decoupled_wbc.data.constants import RS_VIEW_CAMERA_HEIGHT, RS_VIEW_CAMERA_WIDTH + + +@dataclass +class CameraConfig: + width: int + height: int + mapped_key: str + + +class CameraKeyMapper: + def __init__(self): + # Default camera dimensions + self.default_width = RS_VIEW_CAMERA_WIDTH + self.default_height = RS_VIEW_CAMERA_HEIGHT + + # Camera key mapping with custom dimensions + self.camera_configs: Dict[str, CameraConfig] = { + # GR1 + "egoview": CameraConfig(self.default_width, self.default_height, "ego_view"), + "frontview": CameraConfig(self.default_width, self.default_height, "front_view"), + # G1 + "robot0_rs_egoview": CameraConfig(self.default_width, self.default_height, "ego_view"), + "robot0_rs_tppview": CameraConfig(self.default_width, self.default_height, "tpp_view"), + "robot0_oak_egoview": CameraConfig(self.default_width, self.default_height, "ego_view"), + "robot0_oak_left_monoview": CameraConfig( + self.default_width, self.default_height, "ego_view_left_mono" + ), + "robot0_oak_right_monoview": CameraConfig( + self.default_width, self.default_height, "ego_view_right_mono" + ), + } + + def get_camera_config(self, key: str) -> Optional[Tuple[str, int, int]]: + """ + Get the mapped camera key and dimensions for a given camera key. + + Args: + key: The input camera key + + Returns: + Tuple of (mapped_key, width, height) if key exists, None otherwise + """ + config = self.camera_configs.get(key.lower()) + if config is None: + return None + return config.mapped_key, config.width, config.height + + def add_camera_config( + self, key: str, mapped_key: str, width: int = 256, height: int = 256 + ) -> None: + """ + Add a new camera configuration or update an existing one. + + Args: + key: The camera key to add/update + mapped_key: The actual camera key to map to + width: Camera width in pixels + height: Camera height in pixels + """ + self.camera_configs[key.lower()] = CameraConfig(width, height, mapped_key) + + def get_all_camera_keys(self) -> list: + """ + Get all available camera keys. + + Returns: + List of all camera keys + """ + return list(self.camera_configs.keys()) diff --git a/gr00t_wbc/control/envs/robocasa/utils/controller_utils.py b/decoupled_wbc/control/envs/robocasa/utils/controller_utils.py similarity index 100% rename from gr00t_wbc/control/envs/robocasa/utils/controller_utils.py rename to decoupled_wbc/control/envs/robocasa/utils/controller_utils.py diff --git a/decoupled_wbc/control/envs/robocasa/utils/robocasa_env.py b/decoupled_wbc/control/envs/robocasa/utils/robocasa_env.py new file mode 100644 index 0000000..9a0f351 --- /dev/null +++ b/decoupled_wbc/control/envs/robocasa/utils/robocasa_env.py @@ -0,0 +1,443 @@ +import os +from typing import Any, Dict, List, Tuple + +from gymnasium import spaces +import mujoco +import numpy as np +import robocasa +from robocasa.utils.gym_utils.gymnasium_basic import ( + RoboCasaEnv, + create_env_robosuite, +) +from robocasa.wrappers.ik_wrapper import IKWrapper +from robosuite.controllers import load_composite_controller_config +from robosuite.utils.log_utils import ROBOSUITE_DEFAULT_LOGGER + +from decoupled_wbc.control.envs.robocasa.utils.cam_key_converter import CameraKeyMapper +from decoupled_wbc.control.envs.robocasa.utils.robot_key_converter import Gr00tObsActionConverter +from decoupled_wbc.control.robot_model.robot_model import RobotModel + +ALLOWED_LANGUAGE_CHARSET = ( + "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 ,.\n\t[]{}()!?'_:" +) + + +class Gr00tLocomanipRoboCasaEnv(RoboCasaEnv): + def __init__( + self, + env_name: str, + robots_name: str, + robot_model: RobotModel, # gr00t robot model + input_space: str = "JOINT_SPACE", # either "JOINT_SPACE" or "EEF_SPACE" + camera_names: List[str] = ["egoview"], + camera_heights: List[int] | None = None, + camera_widths: List[int] | None = None, + onscreen: bool = False, + offscreen: bool = False, + dump_rollout_dataset_dir: str | None = None, + rollout_hdf5: str | None = None, + rollout_trainset: int | None = None, + controller_configs: str | None = None, + ik_indicator: bool = False, + **kwargs, + ): + # ========= Create env ========= + if controller_configs is None: + if "G1" in robots_name: + controller_configs = ( + "robocasa/examples/third_party_controller/default_mink_ik_g1_wbc.json" + ) + elif "GR1" in robots_name: + controller_configs = ( + "robocasa/examples/third_party_controller/default_mink_ik_gr1_smallkd.json" + ) + else: + assert False, f"Unsupported robot name: {robots_name}" + controller_configs = os.path.join( + os.path.dirname(robocasa.__file__), + "../", + controller_configs, + ) + controller_configs = load_composite_controller_config( + controller=controller_configs, + robot=robots_name.split("_")[0], + ) + if input_space == "JOINT_SPACE": + controller_configs["type"] = "BASIC" + controller_configs["composite_controller_specific_configs"] = {} + controller_configs["control_delta"] = False + + self.camera_key_mapper = CameraKeyMapper() + self.camera_names = camera_names + + if camera_widths is None: + self.camera_widths = [ + self.camera_key_mapper.get_camera_config(name)[1] for name in camera_names + ] + else: + self.camera_widths = camera_widths + if camera_heights is None: + self.camera_heights = [ + self.camera_key_mapper.get_camera_config(name)[2] for name in camera_names + ] + else: + self.camera_heights = camera_heights + + self.env, self.env_kwargs = create_env_robosuite( + env_name=env_name, + robots=robots_name.split("_"), + controller_configs=controller_configs, + camera_names=camera_names, + camera_widths=self.camera_widths, + camera_heights=self.camera_heights, + enable_render=offscreen, + onscreen=onscreen, + **kwargs, # Forward kwargs to create_env_robosuite + ) + + if ik_indicator: + self.env = IKWrapper(self.env, ik_indicator=True) + + # ========= create converters first to get total DOFs ========= + # For now, assume single robot (multi-robot support can be added later) + self.obs_action_converter: List[Gr00tObsActionConverter] = [ + Gr00tObsActionConverter( + robot_model=robot_model, + robosuite_robot_model=self.env.robots[i], + ) + for i in range(len(self.env.robots)) + ] + + self.body_dofs = sum(converter.body_dof for converter in self.obs_action_converter) + self.gripper_dofs = sum(converter.gripper_dof for converter in self.obs_action_converter) + self.total_dofs = self.body_dofs + self.gripper_dofs + self.body_nu = sum(converter.body_nu for converter in self.obs_action_converter) + self.gripper_nu = sum(converter.gripper_nu for converter in self.obs_action_converter) + self.total_nu = self.body_nu + self.gripper_nu + + # ========= create spaces to match total DOFs ========= + self.get_observation_space() + self.get_action_space() + + self.enable_render = offscreen + self.render_obs_key = f"{camera_names[0]}_image" + self.render_cache = None + + self.dump_rollout_dataset_dir = dump_rollout_dataset_dir + self.gr00t_exporter = None + self.np_exporter = None + + self.rollout_hdf5 = rollout_hdf5 + self.rollout_trainset = rollout_trainset + self.rollout_initial_state = {} + + self.verbose = False + for k, v in self.observation_space.items(): + self.verbose and print("{OBS}", k, v) + for k, v in self.action_space.items(): + self.verbose and print("{ACTION}", k, v) + + self.overridden_floating_base_action = None + + def get_observation_space(self): + self.observation_space = spaces.Dict({}) + + # Add all the observation spaces + self.observation_space["time"] = spaces.Box( + low=-np.inf, high=np.inf, shape=(1,), dtype=np.float32 + ) + self.observation_space["floating_base_pose"] = spaces.Box( + low=-np.inf, high=np.inf, shape=(7,), dtype=np.float32 + ) + self.observation_space["floating_base_vel"] = spaces.Box( + low=-np.inf, high=np.inf, shape=(6,), dtype=np.float32 + ) + self.observation_space["floating_base_acc"] = spaces.Box( + low=-np.inf, high=np.inf, shape=(6,), dtype=np.float32 + ) + self.observation_space["body_q"] = spaces.Box( + low=-np.inf, high=np.inf, shape=(self.body_dofs,), dtype=np.float32 + ) + self.observation_space["body_dq"] = spaces.Box( + low=-np.inf, high=np.inf, shape=(self.body_dofs,), dtype=np.float32 + ) + self.observation_space["body_ddq"] = spaces.Box( + low=-np.inf, high=np.inf, shape=(self.body_dofs,), dtype=np.float32 + ) + self.observation_space["body_tau_est"] = spaces.Box( + low=-np.inf, high=np.inf, shape=(self.body_nu,), dtype=np.float32 + ) + self.observation_space["left_hand_q"] = spaces.Box( + low=-np.inf, high=np.inf, shape=(self.gripper_dofs // 2,), dtype=np.float32 + ) + self.observation_space["left_hand_dq"] = spaces.Box( + low=-np.inf, high=np.inf, shape=(self.gripper_dofs // 2,), dtype=np.float32 + ) + self.observation_space["left_hand_ddq"] = spaces.Box( + low=-np.inf, high=np.inf, shape=(self.gripper_dofs // 2,), dtype=np.float32 + ) + self.observation_space["left_hand_tau_est"] = spaces.Box( + low=-np.inf, high=np.inf, shape=(self.gripper_nu // 2,), dtype=np.float32 + ) + self.observation_space["right_hand_q"] = spaces.Box( + low=-np.inf, high=np.inf, shape=(self.gripper_dofs // 2,), dtype=np.float32 + ) + self.observation_space["right_hand_dq"] = spaces.Box( + low=-np.inf, high=np.inf, shape=(self.gripper_dofs // 2,), dtype=np.float32 + ) + self.observation_space["right_hand_ddq"] = spaces.Box( + low=-np.inf, high=np.inf, shape=(self.gripper_dofs // 2,), dtype=np.float32 + ) + self.observation_space["right_hand_tau_est"] = spaces.Box( + low=-np.inf, high=np.inf, shape=(self.gripper_nu // 2,), dtype=np.float32 + ) + + self.observation_space["language.language_instruction"] = spaces.Text( + max_length=256, charset=ALLOWED_LANGUAGE_CHARSET + ) + + # Add camera observation spaces + for camera_name, w, h in zip(self.camera_names, self.camera_widths, self.camera_heights): + k = self.camera_key_mapper.get_camera_config(camera_name)[0] + self.observation_space[f"{k}_image"] = spaces.Box( + low=0, high=255, shape=(h, w, 3), dtype=np.uint8 + ) + + # Add extra privileged observation spaces + if hasattr(self.env, "get_privileged_obs_keys"): + for key, shape in self.env.get_privileged_obs_keys().items(): + self.observation_space[key] = spaces.Box( + low=-np.inf, high=np.inf, shape=shape, dtype=np.float32 + ) + + # Add robot-specific observation spaces + if hasattr(self.env.robots[0].robot_model, "torso_body"): + self.observation_space["secondary_imu_quat"] = spaces.Box( + low=-np.inf, high=np.inf, shape=(4,), dtype=np.float32 + ) + self.observation_space["secondary_imu_vel"] = spaces.Box( + low=-np.inf, high=np.inf, shape=(6,), dtype=np.float32 + ) + + def get_action_space(self): + self.action_space = spaces.Dict( + {"q": spaces.Box(low=-np.inf, high=np.inf, shape=(self.total_dofs,), dtype=np.float32)} + ) + + def reset(self, seed=None, options=None): + raw_obs, info = super().reset(seed=seed, options=options) + obs = self.get_gr00t_observation(raw_obs) + + lang = self.env.get_ep_meta().get("lang", "") + ROBOSUITE_DEFAULT_LOGGER.info(f"Instruction: {lang}") + + return obs, info + + def step( + self, action: Dict[str, Any] + ) -> Tuple[Dict[str, Any], float, bool, bool, Dict[str, Any]]: + # action={"q": xxx, "tau": xxx} + for k, v in action.items(): + self.verbose and print("", k, v) + + joint_actoin_vec = action["q"] + action_dict = {} + for ii, robot in enumerate(self.env.robots): + pf = robot.robot_model.naming_prefix + _action_dict = self.obs_action_converter[ii].gr00t_to_robocasa_action_dict( + joint_actoin_vec + ) + action_dict.update({f"{pf}{k}": v for k, v in _action_dict.items()}) + if action.get("tau", None) is not None: + _torque_dict = self.obs_action_converter[ii].gr00t_to_robocasa_action_dict( + action["tau"] + ) + action_dict.update({f"{pf}{k}_tau": v for k, v in _torque_dict.items()}) + if self.overridden_floating_base_action is not None: + action_dict["robot0_base"] = self.overridden_floating_base_action + raw_obs, reward, terminated, truncated, info = super().step(action_dict) + obs = self.get_gr00t_observation(raw_obs) + + for k, v in obs.items(): + self.verbose and print("", k, v.shape if k.startswith("video.") else v) + self.verbose = False + + return obs, reward, terminated, truncated, info + + def step_only_kinematics( + self, action: Dict[str, Any] + ) -> Tuple[Dict[str, Any], float, bool, bool, Dict[str, Any]]: + joint_actoin_vec = action["q"] + for ii, robot in enumerate(self.env.robots): + joint_names = np.array(self.env.sim.model.joint_names)[robot._ref_joint_indexes] + body_q = self.obs_action_converter[ii].gr00t_to_robocasa_joint_order( + joint_names, joint_actoin_vec + ) + self.env.sim.data.qpos[robot._ref_joint_pos_indexes] = body_q + + for side in ["left", "right"]: + joint_names = np.array(self.env.sim.model.joint_names)[ + robot._ref_joints_indexes_dict[side + "_gripper"] + ] + gripper_q = self.obs_action_converter[ii].gr00t_to_robocasa_joint_order( + joint_names, joint_actoin_vec + ) + self.env.sim.data.qpos[robot._ref_gripper_joint_pos_indexes[side]] = gripper_q + + mujoco.mj_forward(self.env.sim.model._model, self.env.sim.data._data) + + obs = self.force_update_observation() + return obs, 0, False, False, {"success": False} + + def force_update_observation(self, timestep=0): + raw_obs = self.env._get_observations(force_update=True, timestep=timestep) + obs = self.get_basic_observation(raw_obs) + obs = self.get_gr00t_observation(obs) + return obs + + def get_basic_observation(self, raw_obs): + # this function takes a lot of time, so we disable it for now + # raw_obs.update(gather_robot_observations(self.env, format_gripper_space=False)) + + # Image are in (H, W, C), flip it upside down + def process_img(img): + return np.copy(img[::-1, :, :]) + + for obs_name, obs_value in raw_obs.items(): + if obs_name.endswith("_image"): + # image observations + raw_obs[obs_name] = process_img(obs_value) + else: + # non-image observations + raw_obs[obs_name] = obs_value.astype(np.float32) + + # Return black image if rendering is disabled + if not self.enable_render: + for ii, name in enumerate(self.camera_names): + raw_obs[f"{name}_image"] = np.zeros( + (self.camera_heights[ii], self.camera_widths[ii], 3), dtype=np.uint8 + ) + + self.render_cache = raw_obs[self.render_obs_key] + raw_obs["language"] = self.env.get_ep_meta().get("lang", "") + + return raw_obs + + def convert_body_q(self, q: np.ndarray) -> np.ndarray: + # q is in the order of the joints + robot = self.env.robots[0] + joint_names = np.array(self.env.sim.model.joint_names)[robot._ref_joint_indexes] + # this joint names are in the order of the obs_vec + actuated_q = self.obs_action_converter[0].robocasa_to_gr00t_actuated_order( + joint_names, q, "body" + ) + return actuated_q + + def convert_gripper_q(self, q: np.ndarray, side: str = "left") -> np.ndarray: + # q is in the order of the joints + robot = self.env.robots[0] + joint_names = np.array(self.env.sim.model.joint_names)[ + robot._ref_joints_indexes_dict[side + "_gripper"] + ] + actuated_q = self.obs_action_converter[0].robocasa_to_gr00t_actuated_order( + joint_names, q, side + "_gripper" + ) + return actuated_q + + def convert_gripper_tau(self, tau: np.ndarray, side: str = "left") -> np.ndarray: + # tau is in the order of the actuators + robot = self.env.robots[0] + actuator_idx = robot._ref_actuators_indexes_dict[side + "_gripper"] + actuated_joint_names = [ + self.env.sim.model.joint_id2name(self.env.sim.model.actuator_trnid[i][0]) + for i in actuator_idx + ] + actuated_tau = self.obs_action_converter[0].robocasa_to_gr00t_actuated_order( + actuated_joint_names, tau, side + "_gripper" + ) + return actuated_tau + + def get_gr00t_observation(self, raw_obs: Dict[str, Any]) -> Dict[str, Any]: + obs = {} + + if self.env.sim.model.jnt_type[0] == mujoco.mjtJoint.mjJNT_FREE: + # If the first joint is a free joint, use this way to get the floating base data + obs["floating_base_pose"] = self.env.sim.data.qpos[:7] + obs["floating_base_vel"] = self.env.sim.data.qvel[:6] + obs["floating_base_acc"] = self.env.sim.data.qacc[:6] + else: + # Otherwise, use self.env.sim.model to fetch the floating base pose + root_body_id = self.env.sim.model.body_name2id("robot0_base") + + # Get position and orientation from body state + root_pos = self.env.sim.data.body_xpos[root_body_id] + root_quat = self.env.sim.data.body_xquat[root_body_id] # quaternion in wxyz format + + # Combine position and quaternion to form 7-DOF pose + obs["floating_base_pose"] = np.concatenate([root_pos, root_quat]) + # set vel and acc to 0 + obs["floating_base_vel"] = np.zeros(6) + obs["floating_base_acc"] = np.zeros(6) + + obs["body_q"] = self.convert_body_q(raw_obs["robot0_joint_pos"]) + obs["body_dq"] = self.convert_body_q(raw_obs["robot0_joint_vel"]) + obs["body_ddq"] = self.convert_body_q(raw_obs["robot0_joint_acc"]) + + obs["left_hand_q"] = self.convert_gripper_q(raw_obs["robot0_left_gripper_qpos"], "left") + obs["left_hand_dq"] = self.convert_gripper_q(raw_obs["robot0_left_gripper_qvel"], "left") + obs["left_hand_ddq"] = self.convert_gripper_q(raw_obs["robot0_left_gripper_qacc"], "left") + obs["right_hand_q"] = self.convert_gripper_q(raw_obs["robot0_right_gripper_qpos"], "right") + obs["right_hand_dq"] = self.convert_gripper_q(raw_obs["robot0_right_gripper_qvel"], "right") + obs["right_hand_ddq"] = self.convert_gripper_q( + raw_obs["robot0_right_gripper_qacc"], "right" + ) + + robot = self.env.robots[0] + body_tau_idx_list = [] + left_gripper_tau_idx_list = [] + right_gripper_tau_idx_list = [] + for part_name, actuator_idx in robot._ref_actuators_indexes_dict.items(): + if "left_gripper" in part_name: + left_gripper_tau_idx_list.extend(actuator_idx) + elif "right_gripper" in part_name: + right_gripper_tau_idx_list.extend(actuator_idx) + elif "base" in part_name: + assert ( + len(actuator_idx) == 0 or robot.robot_model.default_base == "FloatingLeggedBase" + ) + else: + body_tau_idx_list.extend(actuator_idx) + + body_tau_idx_list = sorted(body_tau_idx_list) + left_gripper_tau_idx_list = sorted(left_gripper_tau_idx_list) + right_gripper_tau_idx_list = sorted(right_gripper_tau_idx_list) + obs["body_tau_est"] = self.convert_body_q( + self.env.sim.data.actuator_force[body_tau_idx_list] + ) + obs["right_hand_tau_est"] = self.convert_gripper_tau( + self.env.sim.data.actuator_force[right_gripper_tau_idx_list], "right" + ) + obs["left_hand_tau_est"] = self.convert_gripper_tau( + self.env.sim.data.actuator_force[left_gripper_tau_idx_list], "left" + ) + + obs["time"] = self.env.sim.data.time + + # Add camera images + for ii, camera_name in enumerate(self.camera_names): + mapped_camera_name = self.camera_key_mapper.get_camera_config(camera_name)[0] + obs[f"{mapped_camera_name}_image"] = raw_obs[f"{camera_name}_image"] + + # Add privileged observations + if hasattr(self.env, "get_privileged_obs_keys"): + for key in self.env.get_privileged_obs_keys(): + obs[key] = raw_obs[key] + + # Add robot-specific observations + if hasattr(self.env.robots[0].robot_model, "torso_body"): + obs["secondary_imu_quat"] = raw_obs["robot0_torso_link_imu_quat"] + obs["secondary_imu_vel"] = raw_obs["robot0_torso_link_imu_vel"] + + obs["language.language_instruction"] = raw_obs["language"] + + return obs diff --git a/decoupled_wbc/control/envs/robocasa/utils/robot_key_converter.py b/decoupled_wbc/control/envs/robocasa/utils/robot_key_converter.py new file mode 100644 index 0000000..3ee5911 --- /dev/null +++ b/decoupled_wbc/control/envs/robocasa/utils/robot_key_converter.py @@ -0,0 +1,301 @@ +from dataclasses import dataclass +from typing import Any, Dict, List, Tuple + +import numpy as np +from robocasa.models.robots import remove_mimic_joints +from robosuite.models.robots import RobotModel as RobosuiteRobotModel + +from decoupled_wbc.control.robot_model import RobotModel + + +class Gr00tJointInfo: + """ + Mapping from decoupled_wbc actuated joint names to robocasa joint names. + """ + + def __init__(self, robot_model: RobosuiteRobotModel): + self.robocasa_body_prefix = "robot0_" + self.robocasa_gripper_prefix = "gripper0_" + + self.robot_model: RobotModel = robot_model + self.body_actuated_joint_names: List[str] = ( + self.robot_model.supplemental_info.body_actuated_joints + ) + self.left_hand_actuated_joint_names: List[str] = ( + self.robot_model.supplemental_info.left_hand_actuated_joints + ) + self.right_hand_actuated_joint_names: List[str] = ( + self.robot_model.supplemental_info.right_hand_actuated_joints + ) + + self.actuated_joint_names: List[str] = self._get_gr00t_actuated_joint_names() + self.body_actuated_joint_to_index: Dict[str, int] = ( + self._get_gr00t_body_actuated_joint_name_to_index() + ) + self.gripper_actuated_joint_to_index: Tuple[Dict[str, int], Dict[str, int]] = ( + self._get_gr00t_gripper_actuated_joint_name_to_index() + ) + self.actuated_joint_name_to_index: Dict[str, int] = ( + self._get_gr00t_actuated_joint_name_to_index() + ) + + def _get_gr00t_actuated_joint_names(self) -> List[str]: + """Get list of gr00t actuated joint names ordered by their indices.""" + if self.robot_model.supplemental_info is None: + raise ValueError("Robot model must have supplemental_info") + + # Get joint names and indices + body_names = self.robot_model.supplemental_info.body_actuated_joints + left_hand_names = self.robot_model.supplemental_info.left_hand_actuated_joints + right_hand_names = self.robot_model.supplemental_info.right_hand_actuated_joints + + body_indices = self.robot_model.get_joint_group_indices("body") + left_hand_indices = self.robot_model.get_joint_group_indices("left_hand") + right_hand_indices = self.robot_model.get_joint_group_indices("right_hand") + + # Create a dictionary mapping index to name + index_to_name = {} + for name, idx in zip(body_names, body_indices): + index_to_name[idx] = self.robocasa_body_prefix + name + for name, idx in zip(left_hand_names, left_hand_indices): + index_to_name[idx] = self.robocasa_gripper_prefix + "left_" + name + for name, idx in zip(right_hand_names, right_hand_indices): + index_to_name[idx] = self.robocasa_gripper_prefix + "right_" + name + sorted_indices = sorted(index_to_name.keys()) + all_actuated_joint_names = [index_to_name[idx] for idx in sorted_indices] + return all_actuated_joint_names + + def _get_gr00t_body_actuated_joint_name_to_index(self) -> Dict[str, int]: + """Get dictionary mapping gr00t actuated joint names to indices.""" + if self.robot_model.supplemental_info is None: + raise ValueError("Robot model must have supplemental_info") + body_names = self.robot_model.supplemental_info.body_actuated_joints + body_indices = self.robot_model.get_joint_group_indices("body") + sorted_indices = np.argsort(body_indices) + sorted_names = [body_names[i] for i in sorted_indices] + return {self.robocasa_body_prefix + name: ii for ii, name in enumerate(sorted_names)} + + def _get_gr00t_gripper_actuated_joint_name_to_index( + self, + ) -> Tuple[Dict[str, int], Dict[str, int]]: + """Get dictionary mapping gr00t actuated joint names to indices.""" + if self.robot_model.supplemental_info is None: + raise ValueError("Robot model must have supplemental_info") + left_hand_names = self.robot_model.supplemental_info.left_hand_actuated_joints + right_hand_names = self.robot_model.supplemental_info.right_hand_actuated_joints + left_hand_indices = self.robot_model.get_joint_group_indices("left_hand") + right_hand_indices = self.robot_model.get_joint_group_indices("right_hand") + sorted_left_hand_indices = np.argsort(left_hand_indices) + sorted_right_hand_indices = np.argsort(right_hand_indices) + sorted_left_hand_names = [left_hand_names[i] for i in sorted_left_hand_indices] + sorted_right_hand_names = [right_hand_names[i] for i in sorted_right_hand_indices] + return ( + { + self.robocasa_gripper_prefix + "left_" + name: ii + for ii, name in enumerate(sorted_left_hand_names) + }, + { + self.robocasa_gripper_prefix + "right_" + name: ii + for ii, name in enumerate(sorted_right_hand_names) + }, + ) + + def _get_gr00t_actuated_joint_name_to_index(self) -> Dict[str, int]: + """Get dictionary mapping gr00t actuated joint names to indices.""" + return {name: ii for ii, name in enumerate(self.actuated_joint_names)} + + +@dataclass +class Gr00tObsActionConverter: + """ + Converter to align simulation environment joint action space with real environment joint action space. + Handles joint order and range conversion. + """ + + robot_model: RobotModel + robosuite_robot_model: RobosuiteRobotModel + robocasa_body_prefix: str = "robot0_" + robocasa_gripper_prefix: str = "gripper0_" + + def __post_init__(self): + """Initialize converter with robot configuration.""" + + self.robot_key = self.robot_model.supplemental_info.name + self.gr00t_joint_info = Gr00tJointInfo(self.robot_model) + self.robocasa_joint_names_for_each_part: Dict[str, List[str]] = ( + self._get_robocasa_joint_names_for_each_part() + ) + self.robocasa_actuator_names_for_each_part: Dict[str, List[str]] = ( + self._get_robotcasa_actuator_names_for_each_part() + ) + + # Store mappings directly as class attributes + self.gr00t_joint_name_to_index = self.gr00t_joint_info.actuated_joint_name_to_index + self.gr00t_body_joint_name_to_index = self.gr00t_joint_info.body_actuated_joint_to_index + self.gr00t_gripper_joint_name_to_index = { + "left": self.gr00t_joint_info.gripper_actuated_joint_to_index[0], + "right": self.gr00t_joint_info.gripper_actuated_joint_to_index[1], + } + self.gr00t_to_robocasa_actuator_indices = self._get_actuator_mapping() + + if self.robot_key == "GR1_Fourier": + self.joint_multiplier = ( + lambda x: np.array([-1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1]) * x + ) + self.actuator_multiplier = ( + lambda x: np.array([-1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1]) * x + ) + else: + self.joint_multiplier = lambda x: x + self.actuator_multiplier = lambda x: x + + # Store DOF counts directly + self.body_dof = len(self.gr00t_joint_info.body_actuated_joint_names) + self.gripper_dof = len(self.gr00t_joint_info.left_hand_actuated_joint_names) + len( + self.gr00t_joint_info.right_hand_actuated_joint_names + ) + self.whole_dof = self.body_dof + self.gripper_dof + self.body_nu = len(self.gr00t_joint_info.body_actuated_joint_names) + self.gripper_nu = len(self.gr00t_joint_info.left_hand_actuated_joint_names) + len( + self.gr00t_joint_info.right_hand_actuated_joint_names + ) + self.whole_nu = self.body_nu + self.gripper_nu + + def _get_robocasa_joint_names_for_each_part(self) -> Dict[str, List[str]]: + part_names = self.robosuite_robot_model._ref_joints_indexes_dict.keys() + robocasa_joint_names_for_each_part = {} + for part_name in part_names: + joint_indices = self.robosuite_robot_model._ref_joints_indexes_dict[part_name] + joint_names = [ + self.robosuite_robot_model.sim.model.joint_id2name(j) for j in joint_indices + ] + robocasa_joint_names_for_each_part[part_name] = joint_names + return robocasa_joint_names_for_each_part + + def _get_robotcasa_actuator_names_for_each_part(self) -> Dict[str, List[str]]: + part_names = self.robosuite_robot_model._ref_actuators_indexes_dict.keys() + robocasa_actuator_names_for_each_part = {} + for part_name in part_names: + if part_name == "base": + continue + actuator_indices = self.robosuite_robot_model._ref_actuators_indexes_dict[part_name] + actuator_names = [ + self.robosuite_robot_model.sim.model.actuator_id2name(j) for j in actuator_indices + ] + robocasa_actuator_names_for_each_part[part_name] = actuator_names + return robocasa_actuator_names_for_each_part + + def _get_actuator_mapping(self) -> Dict[str, List[int]]: + """Get mapping from decoupled_wbc actuatored joint order to robocasa actuatored joint order for whole body.""" + return { + part_name: [ + self.gr00t_joint_info.actuated_joint_name_to_index[j] + for j in self.robocasa_actuator_names_for_each_part[part_name] + ] + for part_name in self.robocasa_actuator_names_for_each_part.keys() + } + + def check_action_dim_match(self, vec_dim: int) -> bool: + """ + Check if input vector dimension matches expected dimension. + + Args: + vec_dim: Dimension of input vector + + Returns: + bool: True if dimensions match + """ + return vec_dim == self.whole_dof + + def gr00t_to_robocasa_action_dict(self, action_vec: np.ndarray) -> Dict[str, Any]: + """ + Convert gr00t flat action vector to robocasa dictionary mapping part names to actions. + + Args: + robot: Robocasa robot model instance + action_vec: Full action vector array in gr00t actuated joint order + + Returns: + dict: Mapping from part names to action vectors for robocasa + """ + if not self.check_action_dim_match(len(action_vec)): + raise ValueError( + f"Action vector dimension mismatch: {len(action_vec)} != {self.whole_dof}" + ) + + action_dict = {} + cc = self.robosuite_robot_model.composite_controller + + for part_name, controller in cc.part_controllers.items(): + if "gripper" in part_name: + robocasa_action = action_vec[self.gr00t_to_robocasa_actuator_indices[part_name]] + if self.actuator_multiplier is not None: + robocasa_action = self.actuator_multiplier(robocasa_action) + action_dict[part_name] = remove_mimic_joints( + cc.grippers[part_name], robocasa_action + ) + elif "base" in part_name: + assert ( + len(self.gr00t_to_robocasa_actuator_indices.get(part_name, [])) == 0 + or self.robosuite_robot_model.default_base == "FloatingLeggedBase" + ) + else: + action_dict[part_name] = action_vec[ + self.gr00t_to_robocasa_actuator_indices[part_name] + ] + + return action_dict + + def robocasa_to_gr00t_actuated_order( + self, joint_names: List[str], q: np.ndarray, obs_type: str = "body" + ) -> np.ndarray: + """ + Convert observation from robocasa joint order to gr00t actuated joint order. + + Args: + joint_names: List of joint names in robocasa order (with prefixes) + q: Joint positions corresponding to joint_names + obs_type: Type of observation ("body", "left_gripper", "right_gripper", or "whole") + + Returns: + Joint positions in gr00t actuated joint order + """ + assert len(joint_names) == len(q), "Joint names and q must have the same length" + + if obs_type == "body": + actuated_q = np.zeros(self.body_dof) + for i, jn in enumerate(joint_names): + actuated_q[self.gr00t_body_joint_name_to_index[jn]] = q[i] + elif obs_type == "left_gripper": + actuated_q = np.zeros(self.gripper_dof // 2) + for i, jn in enumerate(joint_names): + actuated_q[self.gr00t_gripper_joint_name_to_index["left"][jn]] = q[i] + elif obs_type == "right_gripper": + actuated_q = np.zeros(self.gripper_dof // 2) + for i, jn in enumerate(joint_names): + actuated_q[self.gr00t_gripper_joint_name_to_index["right"][jn]] = q[i] + elif obs_type == "whole": + actuated_q = np.zeros(self.whole_dof) + for i, jn in enumerate(joint_names): + actuated_q[self.gr00t_joint_name_to_index[jn]] = q[i] + else: + raise ValueError(f"Unknown observation type: {obs_type}") + return actuated_q + + def gr00t_to_robocasa_joint_order( + self, joint_names: List[str], q_in_actuated_order: np.ndarray + ) -> np.ndarray: + """ + Convert gr00t actuated joint order to robocasa joint order. + + Args: + joint_names: List of joint names in robocasa order (with prefixes) + q_in_actuated_order: Joint positions corresponding to joint_names in gr00t actuated joint order + + Returns: + Joint positions in robocasa joint order + """ + q = np.zeros(len(joint_names)) + for i, jn in enumerate(joint_names): + q[i] = q_in_actuated_order[self.gr00t_joint_name_to_index[jn]] + return q diff --git a/gr00t_wbc/control/envs/robocasa/utils/sim_utils.py b/decoupled_wbc/control/envs/robocasa/utils/sim_utils.py similarity index 100% rename from gr00t_wbc/control/envs/robocasa/utils/sim_utils.py rename to decoupled_wbc/control/envs/robocasa/utils/sim_utils.py diff --git a/gr00t_wbc/control/main/__init__.py b/decoupled_wbc/control/main/__init__.py similarity index 100% rename from gr00t_wbc/control/main/__init__.py rename to decoupled_wbc/control/main/__init__.py diff --git a/gr00t_wbc/control/main/config_template.py b/decoupled_wbc/control/main/config_template.py similarity index 100% rename from gr00t_wbc/control/main/config_template.py rename to decoupled_wbc/control/main/config_template.py diff --git a/gr00t_wbc/control/main/constants.py b/decoupled_wbc/control/main/constants.py similarity index 100% rename from gr00t_wbc/control/main/constants.py rename to decoupled_wbc/control/main/constants.py diff --git a/gr00t_wbc/control/main/teleop/__init__.py b/decoupled_wbc/control/main/teleop/__init__.py similarity index 100% rename from gr00t_wbc/control/main/teleop/__init__.py rename to decoupled_wbc/control/main/teleop/__init__.py diff --git a/decoupled_wbc/control/main/teleop/configs/configs.py b/decoupled_wbc/control/main/teleop/configs/configs.py new file mode 100644 index 0000000..6491086 --- /dev/null +++ b/decoupled_wbc/control/main/teleop/configs/configs.py @@ -0,0 +1,483 @@ +from dataclasses import dataclass +import os +from pathlib import Path +from typing import Literal, Optional + +import yaml + +import decoupled_wbc +from decoupled_wbc.control.main.config_template import ArgsConfig as ArgsConfigTemplate +from decoupled_wbc.control.policy.wbc_policy_factory import WBC_VERSIONS +from decoupled_wbc.control.utils.network_utils import resolve_interface + + +def override_wbc_config( + wbc_config: dict, config: "BaseConfig", missed_keys_only: bool = False +) -> dict: + """Override WBC YAML values with dataclass values. + + Args: + wbc_config: The loaded WBC YAML configuration dictionary + config: The BaseConfig dataclass instance with override values + missed_keys_only: If True, only add keys that don't exist in wbc_config. + If False, validate all keys exist and override all. + + Returns: + Updated wbc_config dictionary with overridden values + + Raises: + KeyError: If any required keys are missing from the WBC YAML configuration + (only when missed_keys_only=False) + """ + # Override yaml values with dataclass values + key_to_value = { + "INTERFACE": config.interface, + "ENV_TYPE": config.env_type, + "VERSION": config.wbc_version, + "SIMULATOR": config.simulator, + "SIMULATE_DT": 1 / float(config.sim_frequency), + "ENABLE_OFFSCREEN": config.enable_offscreen, + "ENABLE_ONSCREEN": config.enable_onscreen, + "model_path": config.wbc_model_path, + "enable_waist": config.enable_waist, + "with_hands": config.with_hands, + "verbose": config.verbose, + "verbose_timing": config.verbose_timing, + "upper_body_max_joint_speed": config.upper_body_joint_speed, + "keyboard_dispatcher_type": config.keyboard_dispatcher_type, + "enable_gravity_compensation": config.enable_gravity_compensation, + "gravity_compensation_joints": config.gravity_compensation_joints, + "high_elbow_pose": config.high_elbow_pose, + } + + if missed_keys_only: + # Only add keys that don't exist in wbc_config + for key in key_to_value: + if key not in wbc_config: + wbc_config[key] = key_to_value[key] + else: + # Set all keys (overwrite existing) + for key in key_to_value: + wbc_config[key] = key_to_value[key] + + # g1 kp, kd, sim2real gap + if config.env_type == "real": + # update waist pitch damping, index 14 + wbc_config["MOTOR_KD"][14] = wbc_config["MOTOR_KD"][14] - 10 + + return wbc_config + + +@dataclass +class BaseConfig(ArgsConfigTemplate): + """Base config inherited by all G1 control loops""" + + # WBC Configuration + wbc_version: Literal[tuple(WBC_VERSIONS)] = "gear_wbc" + """Version of the whole body controller.""" + + wbc_model_path: str = ( + "policy/GR00T-WholeBodyControl-Balance.onnx," "policy/GR00T-WholeBodyControl-Walk.onnx" + ) + """Path to WBC model file (relative to decoupled_wbc/sim2mujoco/resources/robots/g1)""" + """gear_wbc model path: policy/GR00T-WholeBodyControl-Balance.onnx,policy/GR00T-WholeBodyControl-Walk.onnx""" + + wbc_policy_class: str = "G1DecoupledWholeBodyPolicy" + """Whole body policy class.""" + + # System Configuration + interface: str = "sim" + """Interface to use for the control loop. [sim, real, lo, enxe8ea6a9c4e09]""" + + simulator: str = "mujoco" + """Simulator to use.""" + + sim_sync_mode: bool = False + """Whether to run the control loop in sync mode.""" + + control_frequency: int = 50 + """Frequency of the control loop.""" + + sim_frequency: int = 200 + """Frequency of the simulation loop.""" + + # Robot Configuration + enable_waist: bool = False + """Whether to include waist joints in IK.""" + + with_hands: bool = True + """Enable hand functionality. When False, robot operates without hands.""" + + high_elbow_pose: bool = False + """Enable high elbow pose configuration for default joint positions.""" + + verbose: bool = True + """Whether to print verbose output.""" + + # Additional common fields + enable_offscreen: bool = False + """Whether to enable offscreen rendering.""" + + enable_onscreen: bool = True + """Whether to enable onscreen rendering.""" + + upper_body_joint_speed: float = 1000 + """Upper body joint speed.""" + + env_name: str = "default" + """Environment name.""" + + ik_indicator: bool = False + """Whether to draw IK indicators.""" + + verbose_timing: bool = False + """Enable verbose timing output every iteration.""" + + keyboard_dispatcher_type: str = "raw" + """Keyboard dispatcher to use. [raw, ros]""" + + # Gravity Compensation Configuration + enable_gravity_compensation: bool = False + """Enable gravity compensation using pinocchio dynamics.""" + + gravity_compensation_joints: Optional[list[str]] = None + """Joint groups to apply gravity compensation to (e.g., ['arms', 'left_arm', 'right_arm']).""" + # Teleop/Device Configuration + body_control_device: str = "dummy" + """Device to use for body control. Options: dummy, vive, iphone, leapmotion, joycon.""" + + hand_control_device: Optional[str] = "dummy" + """Device to use for hand control. Options: None, manus, joycon, iphone.""" + + body_streamer_ip: str = "10.112.210.229" + """IP address for body streamer (vive only).""" + + body_streamer_keyword: str = "knee" + """Body streamer keyword (vive only).""" + + enable_visualization: bool = False + """Whether to enable visualization.""" + + enable_real_device: bool = True + """Whether to enable real device.""" + + teleop_frequency: int = 20 + """Teleoperation frequency (Hz).""" + + teleop_replay_path: Optional[str] = None + """Path to teleop replay data.""" + + # Deployment/Camera Configuration + robot_ip: str = "192.168.123.164" + """Robot IP address""" + # Data collection settings + data_collection: bool = True + """Enable data collection""" + + data_collection_frequency: int = 20 + """Data collection frequency (Hz)""" + + root_output_dir: str = "outputs" + """Root output directory""" + + # Policy settings + enable_upper_body_operation: bool = True + """Enable upper body operation""" + + upper_body_operation_mode: Literal["teleop", "inference"] = "teleop" + """Upper body operation mode""" + + def __post_init__(self): + # Resolve interface (handles sim/real shortcuts, platform differences, and error handling) + self.interface, self.env_type = resolve_interface(self.interface) + + def load_wbc_yaml(self) -> dict: + """Load and merge wbc yaml with dataclass overrides""" + # Get the base path to decoupled_wbc and convert to Path object + package_path = Path(os.path.dirname(decoupled_wbc.__file__)) + + if self.wbc_version == "gear_wbc": + config_path = str(package_path / "control/main/teleop/configs/g1_29dof_gear_wbc.yaml") + else: + raise ValueError( + f"Invalid wbc_version: {self.wbc_version}, please use one of: " f"gear_wbc" + ) + + with open(config_path) as file: + wbc_config = yaml.load(file, Loader=yaml.FullLoader) + + # Override yaml values with dataclass values + wbc_config = override_wbc_config(wbc_config, self) + + return wbc_config + + +@dataclass +class ControlLoopConfig(BaseConfig): + """Config for running the G1 control loop.""" + + pass + + +@dataclass +class TeleopConfig(BaseConfig): + """Config for running the G1 teleop policy loop.""" + + robot: Literal["g1"] = "g1" + """Name of the robot to use, e.g., 'g1'.""" + + lerobot_replay_path: Optional[str] = None + """Path to lerobot replay data.""" + + # Override defaults for teleop-specific values + body_streamer_ip: str = "10.110.67.24" + """IP address for body streamer (vive only).""" + + body_streamer_keyword: str = "foot" + """Keyword for body streamer (vive only).""" + + teleop_frequency: float = 20 # Override to be float instead of int + """Frequency of the teleop loop.""" + + binary_hand_ik: bool = True + """Whether to use binary IK.""" + + +@dataclass +class ComposedCameraClientConfig: + """Config for running the composed camera client.""" + + camera_port: int = 5555 + """Port number""" + + camera_host: str = "localhost" + """Host IP address""" + + fps: float = 20.0 + """FPS of the camera viewer""" + + +@dataclass +class DataExporterConfig(BaseConfig, ComposedCameraClientConfig): + """Config for running the G1 data exporter.""" + + dataset_name: Optional[str] = None + """Name of the dataset to save the data to. If the dataset already exists, + the new episodes will be appended to existing dataset. If the dataset does not exist, + episodes will be saved under root_output_dir/dataset_name. + """ + + task_prompt: str = "demo" + """Language Task prompt for the dataset.""" + + state_dim: int = 43 + """Size of the state.""" + + action_dim: int = 43 + """Size of the action.""" + + teleoperator_username: Optional[str] = None + """Teleoperator username.""" + + support_operator_username: Optional[str] = None + """Support operator username.""" + + robot_id: Optional[str] = None + """Robot ID.""" + + lower_body_policy: Optional[str] = None + """Lower body policy.""" + + img_stream_viewer: bool = False + """Whether to open a matplot lib window to view the camera images.""" + + text_to_speech: bool = True + """Whether to use text-to-speech for voice feedback.""" + + add_stereo_camera: bool = True + """Whether to add stereo camera for data collection. If False, only use a signle ego view camera.""" + + +@dataclass +class SyncSimDataCollectionConfig(ControlLoopConfig, TeleopConfig): + """Args Config for running the data collection loop.""" + + robot: str = "G1" + """Name of the robot to collect data for (e.g., G1 variants).""" + + task_name: str = "GroundOnly" + """Name of the task to collect data for. [PnPBottle, GroundOnly, ...]""" + + body_control_device: str = "dummy" + """Device to use for body control. Options: dummy, vive, iphone, leapmotion, joycon.""" + + hand_control_device: Optional[str] = "dummy" + """Device to use for hand control. Options: None, manus, joycon, iphone.""" + + remove_existing_dir: bool = False + """Whether to remove existing output directory if it exists.""" + + hardcode_teleop_cmd: bool = False + """Whether to hardcode the teleop command for testing purposes.""" + + ik_indicator: bool = False + """Whether to draw IK indicators.""" + + enable_onscreen: bool = True + """Whether to show the onscreen rendering.""" + + save_img_obs: bool = False + """Whether to save image observations.""" + + success_hold_steps: int = 50 + """Number of steps to collect after task completion before saving.""" + + renderer: Literal["mjviewer", "mujoco", "rerun"] = "mjviewer" + """Renderer to use for the environment. """ + + replay_data_path: str | None = None + """Path to the data (.pkl) to replay. If None, will not replay. Used for CI/CD.""" + + replay_speed: float = 2.5 + """Speed multiplier for replay data. Higher values make replay slower (e.g., 2.5 for sync sim tests).""" + + ci_test: bool = False + """Whether to run the CI test.""" + + ci_test_mode: Literal["unit", "pre_merge"] = "pre_merge" + """'unit' for fast 50-step tests, 'pre_merge' for 500-step test with tracking checks.""" + + manual_control: bool = False + """Enable manual control of data collection start/save. When True, use toggle_data_collection + to manually control episode states (idle -> recording -> need_to_save -> idle). + When False (default), automatically starts and stops data collection based on task completion.""" + + +@dataclass +class SyncSimPlaybackConfig(SyncSimDataCollectionConfig): + """Configuration class for playback script arguments.""" + + enable_real_device: bool = False + """Whether to enable real device""" + + dataset: str | None = None + """Path to the demonstration dataset, either an HDF5 file or a LeRobot folder path.""" + + use_actions: bool = False + """Whether to use actions for playback""" + + use_wbc_goals: bool = False + """Whether to use WBC goals for control""" + + use_teleop_cmd: bool = False + """Whether to use teleop IK for action generation""" + + # Video recording arguments. + # Warning: enabling this key will leads to divergence between playback and recording. + save_video: bool = False + """Whether to save video of the playback""" + + # Saving to LeRobot dataset. + # Warning: enabling this key will leads to divergence between playback and recording. + save_lerobot: bool = False + """Whether to save the playback as a new LeRobot dataset""" + + video_path: str | None = None + """Path to save the output video. If not specified, + will use the nearest folder to dataset and save as playback_video.mp4""" + + num_episodes: int = 1 + """Number of episodes to load and playback/record (loads only the first N episodes from dataset)""" + + intervention: bool = False + """Whether to denote intervention timesteps with colored borders in video frames""" + + ci_test: bool = False + """Whether this is a CI test run, which limits the number of steps for testing purposes""" + + def validate_args(self): + # Validate argument combinations + if self.use_teleop_cmd and not self.use_actions: + raise ValueError("--use-teleop-cmd requires --use-actions to be set") + + # Note: using teleop cmd has playback divergence unlike using wbc goals, as TeleopPolicy has a warmup loop + if self.use_teleop_cmd and self.use_wbc_goals: + raise ValueError("--use-teleop-cmd and --use-wbc-goals are mutually exclusive") + + if (self.use_teleop_cmd or self.use_wbc_goals) and not self.use_actions: + raise ValueError( + "You are using --use-teleop-cmd or --use-wbc-goals but not --use-actions. " + "This will not play back actions whether via teleop or wbc goals. " + "Instead, it'll play back states only." + ) + + if self.save_img_obs and not self.save_lerobot: + raise ValueError("--save-img-obs is only supported with --save-lerobot") + + if self.intervention and not self.save_video: + raise ValueError("--intervention requires --save-video to be enabled for visualization") + + +@dataclass +class WebcamRecorderConfig(BaseConfig): + """Config for running the webcam recorder.""" + + output_dir: str = "logs_experiment" + """Output directory for webcam recordings""" + + device_id: int = 0 + """Camera device ID""" + + fps: int = 30 + """Recording frame rate""" + + duration: Optional[int] = None + """Recording duration in seconds (None for continuous)""" + + +@dataclass +class SimLoopConfig(BaseConfig): + """Config for running the simulation loop.""" + + mp_start_method: str = "spawn" + """Multiprocessing start method""" + + enable_image_publish: bool = False + """Enable image publishing in simulation""" + + camera_port: int = 5555 + """Camera port for image publishing""" + + verbose: bool = False + """Verbose output, override the base config verbose""" + + +@dataclass +class DeploymentConfig(BaseConfig, ComposedCameraClientConfig): + """G1 Robot Deployment Configuration + + Simplified deployment config that inherits all common fields from G1BaseConfig. + All deployment settings are now available in the base config. + """ + + camera_publish_rate: float = 30.0 + """Camera publish rate (Hz)""" + + view_camera: bool = True + """Enable camera viewer""" + # Webcam recording settings + enable_webcam_recording: bool = True + """Enable webcam recording for real robot deployment monitoring""" + + webcam_output_dir: str = "logs_experiment" + """Output directory for webcam recordings""" + + skip_img_transform: bool = False + """Skip image transformation in the model (for faster internet)""" + + sim_in_single_process: bool = False + """Run simulator in a separate process. When True, sets simulator to None in main control loop + and launches run_sim_loop.py separately.""" + + image_publish: bool = False + """Enable image publishing in simulation loop (passed to run_sim_loop.py)""" diff --git a/decoupled_wbc/control/main/teleop/configs/g1_29dof_gear_wbc.yaml b/decoupled_wbc/control/main/teleop/configs/g1_29dof_gear_wbc.yaml new file mode 100644 index 0000000..e7c0363 --- /dev/null +++ b/decoupled_wbc/control/main/teleop/configs/g1_29dof_gear_wbc.yaml @@ -0,0 +1,421 @@ +GEAR_WBC_CONFIG: "decoupled_wbc/sim2mujoco/resources/robots/g1/g1_gear_wbc.yaml" + +# copy from g1_43dof_hist.yaml +ROBOT_TYPE: 'g1_29dof' # Robot name, "go2", "b2", "b2w", "h1", "go2w", "g1" +ROBOT_SCENE: "decoupled_wbc/control/robot_model/model_data/g1/scene_43dof.xml" # Robot scene, for Sim2Sim +# ROBOT_SCENE: "decoupled_wbc/control/robot_model/model_data/g1/scene_29dof_activated3dex.xml" + +DOMAIN_ID: 0 # Domain id +# Network Interface, "lo" for simulation and the one with "192.168.123.222" for real robot +# INTERFACE: "enxe8ea6a9c4e09" +# INTERFACE: "enxc8a3623c9cb7" +INTERFACE: "lo" +SIMULATOR: "mujoco" # "robocasa" + +USE_JOYSTICK: 0 # Simulate Unitree WirelessController using a gamepad (0: disable, 1: enable) +JOYSTICK_TYPE: "xbox" # support "xbox" and "switch" gamepad layout +JOYSTICK_DEVICE: 0 # Joystick number + +FREE_BASE: False + +PRINT_SCENE_INFORMATION: True # Print link, joint and sensors information of robot +ENABLE_ELASTIC_BAND: True # Virtual spring band, used for lifting h1 + +SIMULATE_DT: 0.005 # Need to be larger than the runtime of viewer.sync() +VIEWER_DT: 0.02 # Viewer update time +REWARD_DT: 0.02 +USE_SENSOR: False +USE_HISTORY: True +USE_HISTORY_LOCO: True +USE_HISTORY_MIMIC: True + +GAIT_PERIOD: 0.9 # 1.25 + +MOTOR2JOINT: [0, 1, 2, 3, 4, 5, + 6, 7, 8, 9, 10, 11, + 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28] + +JOINT2MOTOR: [0, 1, 2, 3, 4, 5, + 6, 7, 8, 9, 10, 11, + 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28] + + +UNITREE_LEGGED_CONST: + HIGHLEVEL: 0xEE + LOWLEVEL: 0xFF + TRIGERLEVEL: 0xF0 + PosStopF: 2146000000.0 + VelStopF: 16000.0 + MODE_MACHINE: 5 + MODE_PR: 0 + +JOINT_KP: [ + 100, 100, 100, 200, 20, 20, + 100, 100, 100, 200, 20, 20, + 400, 400, 400, + 90, 60, 20, 60, 4, 4, 4, + 90, 60, 20, 60, 4, 4, 4 +] + + +JOINT_KD: [ + 2.5, 2.5, 2.5, 5, 0.2, 0.1, + 2.5, 2.5, 2.5, 5, 0.2, 0.1, + 5.0, 5.0, 5.0, + 2.0, 1.0, 0.4, 1.0, 0.2, 0.2, 0.2, + 2.0, 1.0, 0.4, 1.0, 0.2, 0.2, 0.2 +] + +# arm kp +# soft kp, safe, test it first +# 50, 50, 20, 20, 10, 10, 10 +# hard kp, use only if policy is safe +# 200, 200, 80, 80, 50, 50, 50, + +# MOTOR_KP: [ +# 100, 100, 100, 200, 20, 20, +# 100, 100, 100, 200, 20, 20, +# 400, 400, 400, +# 50, 50, 20, 20, 10, 10, 10, +# 50, 50, 20, 20, 10, 10, 10 +# ] + +MOTOR_KP: [ + 150, 150, 150, 200, 40, 40, + 150, 150, 150, 200, 40, 40, + 250, 250, 250, + 100, 100, 40, 40, 20, 20, 20, + 100, 100, 40, 40, 20, 20, 20 +] + +MOTOR_KD: [ + 2, 2, 2, 4, 2, 2, + 2, 2, 2, 4, 2, 2, + 5, 5, 5, + 5, 5, 2, 2, 2, 2, 2, + 5, 5, 2, 2, 2, 2, 2 +] + +# MOTOR_KP: [ +# 100, 100, 100, 200, 20, 20, +# 100, 100, 100, 200, 20, 20, +# 400, 400, 400, +# 90, 60, 20, 60, 4, 4, 4, +# 90, 60, 20, 60, 4, 4, 4 +# ] + + +# MOTOR_KD: [ +# 2.5, 2.5, 2.5, 5, 0.2, 0.1, +# 2.5, 2.5, 2.5, 5, 0.2, 0.1, +# 5.0, 5.0, 5.0, +# 2.0, 1.0, 0.4, 1.0, 0.2, 0.2, 0.2, +# 2.0, 1.0, 0.4, 1.0, 0.2, 0.2, 0.2 +# ] + + +WeakMotorJointIndex: + left_hip_yaw_joint: 0 + left_hip_roll_joint: 1 + left_hip_pitch_joint: 2 + left_knee_joint: 3 + left_ankle_pitch_joint: 4 + left_ankle_roll_joint: 5 + right_hip_yaw_joint: 6 + right_hip_roll_joint: 7 + right_hip_pitch_joint: 8 + right_knee_joint: 9 + right_ankle_pitch_joint: 10 + right_ankle_roll_joint: 11 + waist_yaw_joint : 12 + waist_roll_joint : 13 + waist_pitch_joint : 14 + left_shoulder_pitch_joint: 15 + left_shoulder_roll_joint: 16 + left_shoulder_yaw_joint: 17 + left_elbow_joint: 18 + left_wrist_roll_joint: 19 + left_wrist_pitch_joint: 20 + left_wrist_yaw_joint: 21 + right_shoulder_pitch_joint: 22 + right_shoulder_roll_joint: 23 + right_shoulder_yaw_joint: 24 + right_elbow_joint: 25 + right_wrist_roll_joint: 26 + right_wrist_pitch_joint: 27 + right_wrist_yaw_joint: 28 + +NUM_MOTORS: 29 +NUM_JOINTS: 29 +NUM_HAND_MOTORS: 7 +NUM_HAND_JOINTS: 7 +NUM_UPPER_BODY_JOINTS: 17 + +DEFAULT_DOF_ANGLES: [ + -0.1, # left_hip_pitch_joint + 0.0, # left_hip_roll_joint + 0.0, # left_hip_yaw_joint + 0.3, # left_knee_joint + -0.2, # left_ankle_pitch_joint + 0.0, # left_ankle_roll_joint + -0.1, # right_hip_pitch_joint + 0.0, # right_hip_roll_joint + 0.0, # right_hip_yaw_joint + 0.3, # right_knee_joint + -0.2, # right_ankle_pitch_joint + 0.0, # right_ankle_roll_joint + 0.0, # waist_yaw_joint + 0.0, # waist_roll_joint + 0.0, # waist_pitch_joint + 0.0, # left_shoulder_pitch_joint + 0.0, # left_shoulder_roll_joint + 0.0, # left_shoulder_yaw_joint + 0.0, # left_elbow_joint + 0.0, # left_wrist_roll_joint + 0.0, # left_wrist_pitch_joint + 0.0, # left_wrist_yaw_joint + 0.0, # right_shoulder_pitch_joint + 0.0, # right_shoulder_roll_joint + 0.0, # right_shoulder_yaw_joint + 0.0, # right_elbow_joint + 0.0, # right_wrist_roll_joint + 0.0, # right_wrist_pitch_joint + 0.0 # right_wrist_yaw_joint +] + +DEFAULT_MOTOR_ANGLES: [ + -0.1, # left_hip_pitch_joint + 0.0, # left_hip_roll_joint + 0.0, # left_hip_yaw_joint + 0.3, # left_knee_joint + -0.2, # left_ankle_pitch_joint + 0.0, # left_ankle_roll_joint + -0.1, # right_hip_pitch_joint + 0.0, # right_hip_roll_joint + 0.0, # right_hip_yaw_joint + 0.3, # right_knee_joint + -0.2, # right_ankle_pitch_joint + 0.0, # right_ankle_roll_joint + 0.0, # waist_yaw_joint + 0.0, # waist_roll_joint + 0.0, # waist_pitch_joint + 0.0, # left_shoulder_pitch_joint + 0.0, # left_shoulder_roll_joint + 0.0, # left_shoulder_yaw_joint + 0.0, # left_elbow_joint + 0.0, # left_wrist_roll_joint + 0.0, # left_wrist_pitch_joint + 0.0, # left_wrist_yaw_joint + 0.0, # right_shoulder_pitch_joint + 0.0, # right_shoulder_roll_joint + 0.0, # right_shoulder_yaw_joint + 0.0, # right_elbow_joint + 0.0, # right_wrist_roll_joint + 0.0, # right_wrist_pitch_joint + 0.0 # right_wrist_yaw_joint +] + +motor_pos_lower_limit_list: [-2.5307, -0.5236, -2.7576, -0.087267, -0.87267, -0.2618, + -2.5307, -2.9671, -2.7576, -0.087267, -0.87267, -0.2618, + -2.618, -0.52, -0.52, + -3.0892, -1.5882, -2.618, -1.0472, + -1.972222054, -1.61443, -1.61443, + -3.0892, -2.2515, -2.618, -1.0472, + -1.972222054, -1.61443, -1.61443] +motor_pos_upper_limit_list: [2.8798, 2.9671, 2.7576, 2.8798, 0.5236, 0.2618, + 2.8798, 0.5236, 2.7576, 2.8798, 0.5236, 0.2618, + 2.618, 0.52, 0.52, + 2.6704, 2.2515, 2.618, 2.0944, + 1.972222054, 1.61443, 1.61443, + 2.6704, 1.5882, 2.618, 2.0944, + 1.972222054, 1.61443, 1.61443] +motor_vel_limit_list: [32.0, 32.0, 32.0, 20.0, 37.0, 37.0, + 32.0, 32.0, 32.0, 20.0, 37.0, 37.0, + 32.0, 37.0, 37.0, + 37.0, 37.0, 37.0, 37.0, + 37.0, 22.0, 22.0, + 37.0, 37.0, 37.0, 37.0, + 37.0, 22.0, 22.0] +motor_effort_limit_list: [88.0, 88.0, 88.0, 139.0, 50.0, 50.0, + 88.0, 88.0, 88.0, 139.0, 50.0, 50.0, + 88.0, 50.0, 50.0, + 25.0, 25.0, 25.0, 25.0, + 25.0, 5.0, 5.0, + 2.45, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, + 25.0, 25.0, 25.0, 25.0, + 25.0, 5.0, 5.0, + 2.45, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7] +history_config: { + base_ang_vel: 4, + projected_gravity: 4, + command_lin_vel: 4, + command_ang_vel: 4, + command_base_height: 4, + command_stand: 4, + ref_upper_dof_pos: 4, + dof_pos: 4, + dof_vel: 4, + actions: 4, + # phase_time: 4, + ref_motion_phase: 4, + sin_phase: 4, + cos_phase: 4 + } +history_loco_config: { + base_ang_vel: 4, + projected_gravity: 4, + command_lin_vel: 4, + command_ang_vel: 4, + # command_base_height: 4, + command_stand: 4, + ref_upper_dof_pos: 4, + dof_pos: 4, + dof_vel: 4, + actions: 4, + # phase_time: 4, + sin_phase: 4, + cos_phase: 4 + } +history_loco_height_config: { + base_ang_vel: 4, + projected_gravity: 4, + command_lin_vel: 4, + command_ang_vel: 4, + command_base_height: 4, + command_stand: 4, + ref_upper_dof_pos: 4, + dof_pos: 4, + dof_vel: 4, + actions: 4, + # phase_time: 4, + sin_phase: 4, + cos_phase: 4 + } +history_mimic_config: { + base_ang_vel: 4, + projected_gravity: 4, + dof_pos: 4, + dof_vel: 4, + actions: 4, + ref_motion_phase: 4, + } +obs_dims: { + base_lin_vel: 3, + base_ang_vel: 3, + projected_gravity: 3, + command_lin_vel: 2, + command_ang_vel: 1, + command_stand: 1, + command_base_height: 1, + ref_upper_dof_pos: 17, # upper body actions + dof_pos: 29, + dof_vel: 29, + # actions: 12, # lower body actions + actions: 29, # full body actions + phase_time: 1, + ref_motion_phase: 1, # mimic motion phase + sin_phase: 1, + cos_phase: 1, + } +obs_loco_dims: { + base_lin_vel: 3, + base_ang_vel: 3, + projected_gravity: 3, + command_lin_vel: 2, + command_ang_vel: 1, + command_stand: 1, + command_base_height: 1, + ref_upper_dof_pos: 17, # upper body actions + dof_pos: 29, + dof_vel: 29, + actions: 12, # lower body actions + phase_time: 1, + sin_phase: 1, + cos_phase: 1, + } +obs_mimic_dims: { + base_lin_vel: 3, + base_ang_vel: 3, + projected_gravity: 3, + dof_pos: 29, + dof_vel: 29, + actions: 29, # full body actions + ref_motion_phase: 1, # mimic motion phase + } +obs_scales: { + base_lin_vel: 2.0, + base_ang_vel: 0.25, + projected_gravity: 1.0, + command_lin_vel: 1, + command_ang_vel: 1, + command_stand: 1, + command_base_height: 2, # Yuanhang: it's 2, not 1! + ref_upper_dof_pos: 1.0, + dof_pos: 1.0, + dof_vel: 0.05, + history: 1.0, + history_loco: 1.0, + history_mimic: 1.0, + actions: 1.0, + phase_time: 1.0, + ref_motion_phase: 1.0, + sin_phase: 1.0, + cos_phase: 1.0 + } + +loco_upper_body_dof_pos: [ + 0.0, 0.0, 0.0, # waist + 0.0, 0.3, 0.0, 1.0, # left shoulder and elbow + 0.0, 0.0, 0.0, # left wrist + 0.0, -0.3, 0.0, 1.0, # right shoulder and elbow + 0.0, 0.0, 0.0 # right wrist +] + +robot_dofs: { + "g1_29dof": [1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, + 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1], + "g1_29dof_anneal_23dof": [1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, + 1, 1, 1, + 1, 1, 1, 1, 0, 0, 0, + 1, 1, 1, 1, 0, 0, 0], +} + +mimic_robot_types: { + + "APT_level1": "g1_29dof_anneal_23dof", +} + + + + +# 01281657 +mimic_models: { + "APT_level1": "20250116_225127-TairanTestbed_G129dofANNEAL23dof_dm_APT_video_APT_level1_MinimalFriction-0.3_RfiTrue_Far0.325_RESUME_LARGENOISE-motion_tracking-g1_29dof_anneal_23dof/exported/model_176500.onnx", + +} + + + +start_upper_body_dof_pos: { + + "APT_level1": + [0.19964170455932617, 0.07710712403059006, -0.2882401943206787, + 0.21672365069389343, 0.15629297494888306, -0.5167576670646667, 0.5782126784324646, + 0.0, 0.0, 0.0, + 0.25740593671798706, -0.2504104673862457, 0.22500675916671753, 0.5127624273300171, + 0.0, 0.0, 0.0], + +} + +motion_length_s: { + "APT_level1": 7.66, + +} diff --git a/gr00t_wbc/control/main/teleop/configs/g1_gear_wbc.yaml b/decoupled_wbc/control/main/teleop/configs/g1_gear_wbc.yaml similarity index 100% rename from gr00t_wbc/control/main/teleop/configs/g1_gear_wbc.yaml rename to decoupled_wbc/control/main/teleop/configs/g1_gear_wbc.yaml diff --git a/gr00t_wbc/control/main/teleop/configs/identifiers.py b/decoupled_wbc/control/main/teleop/configs/identifiers.py similarity index 100% rename from gr00t_wbc/control/main/teleop/configs/identifiers.py rename to decoupled_wbc/control/main/teleop/configs/identifiers.py diff --git a/decoupled_wbc/control/main/teleop/playback_sync_sim_data.py b/decoupled_wbc/control/main/teleop/playback_sync_sim_data.py new file mode 100644 index 0000000..da8c241 --- /dev/null +++ b/decoupled_wbc/control/main/teleop/playback_sync_sim_data.py @@ -0,0 +1,627 @@ +""" +A convenience script to playback random demonstrations using the decoupled_wbc controller from +a set of demonstrations stored in a hdf5 file. + +Arguments: + --dataset (str): Path to demonstrations + --use-actions (optional): If this flag is provided, the actions are played back + through the MuJoCo simulator, instead of loading the simulator states + one by one. + --use-wbc-goals (optional): If set, will use the stored WBC goals to control the robot, + otherwise will use the actions directly. Only relevant if --use-actions is set. + --use-teleop-cmd (optional): If set, will use teleop IK directly with WBC timing + for action generation. Only relevant if --use-actions is set. + --visualize-gripper (optional): If set, will visualize the gripper site + --save-video (optional): If set, will save video of the playback using offscreen rendering + --video-path (optional): Path to save the output video. If not specified, will use the nearest + folder to dataset and save as playback_video.mp4 + --num-episodes (optional): Number of episodes to playback/record (if None, plays random episodes) + +Example: + $ python decoupled_wbc/control/main/teleop/playback_sync_sim_data.py --dataset output/robocasa_datasets/ + --use-actions --use-wbc-goals + + $ python decoupled_wbc/control/main/teleop/playback_sync_sim_data.py --dataset output/robocasa_datasets/ + --use-actions --use-teleop-cmd + + # Record video of the first 5 episodes using WBC goals + $ python decoupled_wbc/control/main/teleop/playback_sync_sim_data.py --dataset output/robocasa_datasets/ + --use-actions --use-wbc-goals --save-video --num-episodes 5 +""" + +import json +import os +from pathlib import Path +import time +from typing import Optional + +import cv2 +import numpy as np +import rclpy +from robosuite.environments.robot_env import RobotEnv +from tqdm import tqdm +import tyro + +from decoupled_wbc.control.main.teleop.configs.configs import SyncSimPlaybackConfig +from decoupled_wbc.control.robot_model.instantiation import get_robot_type_and_model +from decoupled_wbc.control.utils.sync_sim_utils import ( + generate_frame, + get_data_exporter, + get_env, + get_policies, +) +from decoupled_wbc.data.constants import RS_VIEW_CAMERA_HEIGHT, RS_VIEW_CAMERA_WIDTH +from decoupled_wbc.data.exporter import TypedLeRobotDataset + +CONTROL_NODE_NAME = "playback_node" +GREEN_BOLD = "\033[1;32m" +RED_BOLD = "\033[1;31m" +RESET = "\033[0m" + + +def load_lerobot_dataset(root_path, max_episodes=None): + task_name = None + episodes = [] + start_index = 0 + with open(Path(root_path) / "meta/episodes.jsonl", "r") as f: + for line in f: + episode = json.loads(line) + episode["start_index"] = start_index + start_index += episode["length"] + assert ( + task_name is None or task_name == episode["tasks"][0] + ), "All episodes should have the same task name" + task_name = episode["tasks"][0] + episodes.append(episode) + + dataset = TypedLeRobotDataset( + repo_id="tmp/test", + root=root_path, + load_video=False, + ) + + script_config = dataset.meta.info["script_config"] + + assert len(dataset) == start_index, "Dataset length does not match expected length" + + # Limit episodes if specified + if max_episodes is not None: + episodes = episodes[:max_episodes] + print( + f"Loading only first {len(episodes)} episodes (limited by max_episodes={max_episodes})" + ) + + f = {} + seeds = [] + for ep in tqdm(range(len(episodes))): + seed = None + f[f"data/demo_{ep + 1}/states"] = [] + f[f"data/demo_{ep + 1}/actions"] = [] + f[f"data/demo_{ep + 1}/teleop_cmd"] = [] + f[f"data/demo_{ep + 1}/wbc_goal"] = [] + start_index = episodes[ep]["start_index"] + end_index = start_index + episodes[ep]["length"] + for i in tqdm(range(start_index, end_index)): + frame = dataset[i] + # load the seed + assert ( + seed is None or seed == np.array(frame["observation.sim.seed"]).item() + ), "All observations in an episode should have the same seed" + seed = np.array(frame["observation.sim.seed"]).item() + # load the state + mujoco_state_len = frame["observation.sim.mujoco_state_len"] + mujoco_state = frame["observation.sim.mujoco_state"] + f[f"data/demo_{ep + 1}/states"].append(np.array(mujoco_state[:mujoco_state_len])) + # load the action + action = frame["action"] + f[f"data/demo_{ep + 1}/actions"].append(np.array(action)) + + # load the teleop command + teleop_cmd = { + "left_wrist": np.array(frame["observation.sim.left_wrist"].reshape(4, 4)), + "right_wrist": np.array(frame["observation.sim.right_wrist"].reshape(4, 4)), + "left_fingers": { + "position": np.array(frame["observation.sim.left_fingers"].reshape(25, 4, 4)), + }, + "right_fingers": { + "position": np.array(frame["observation.sim.right_fingers"].reshape(25, 4, 4)), + }, + "target_upper_body_pose": np.array(frame["observation.sim.target_upper_body_pose"]), + "base_height_command": np.array(frame["teleop.base_height_command"]), + "navigate_cmd": np.array(frame["teleop.navigate_command"]), + } + f[f"data/demo_{ep + 1}/teleop_cmd"].append(teleop_cmd) + # load the WBC goal + wbc_goal = { + "wrist_pose": np.array(frame["action.eef"]), + "target_upper_body_pose": np.array(frame["observation.sim.target_upper_body_pose"]), + "navigate_cmd": np.array(frame["teleop.navigate_command"]), + "base_height_command": np.array(frame["teleop.base_height_command"]), + } + f[f"data/demo_{ep + 1}/wbc_goal"].append(wbc_goal) + + seeds.append(seed) + + return seeds, f, script_config + + +def validate_state(recorded_state, playback_state, ep, step, tolerance=1e-5): + """Validate that playback state matches recorded state within tolerance.""" + if not np.allclose(recorded_state, playback_state, atol=tolerance): + err = np.linalg.norm(recorded_state - playback_state) + print(f"[warning] state diverged by {err:.12f} for ep {ep} at step {step}") + return False + return True + + +def generate_and_save_frame( + config, sync_env, obs, wbc_action, seed, teleop_cmd, wbc_goal, gr00t_exporter +): + """Generate and save a frame to LeRobot dataset if enabled.""" + if config.save_lerobot: + max_mujoco_state_len, mujoco_state_len, mujoco_state = sync_env.get_mujoco_state_info() + frame = generate_frame( + obs, + wbc_action, + seed, + mujoco_state, + mujoco_state_len, + max_mujoco_state_len, + teleop_cmd, + wbc_goal, + config.save_img_obs, + ) + gr00t_exporter.add_frame(frame) + + +def playback_wbc_goals( + sync_env, + wbc_policy, + wbc_goals, + teleop_cmds, + states, + env, + onscreen, + config, + video_writer, + ep, + seed, + gr00t_exporter, + end_steps, +): + """Playback using WBC goals to control the robot.""" + ret = True + num_wbc_goals = len(wbc_goals) if end_steps == -1 else min(end_steps, len(wbc_goals)) + + for jj in range(num_wbc_goals): + wbc_goal = wbc_goals[jj] + obs = sync_env.observe() + wbc_policy.set_observation(obs) + wbc_policy.set_goal(wbc_goal) + wbc_action = wbc_policy.get_action() + sync_env.queue_action(wbc_action) + + # Save frame if needed + if config.save_lerobot: + teleop_cmd = teleop_cmds[jj] + generate_and_save_frame( + config, sync_env, obs, wbc_action, seed, teleop_cmd, wbc_goal, gr00t_exporter + ) + + capture_or_render_frame(env, onscreen, config, video_writer) + + if jj < len(states) - 1: + state_playback = env.sim.get_state().flatten() + if not validate_state(states[jj + 1], state_playback, ep, jj): + ret = False + + return ret + + +def playback_teleop_cmd( + sync_env, + wbc_policy, + teleop_policy, + wbc_goals, + teleop_cmds, + states, + env, + onscreen, + config, + video_writer, + ep, + seed, + gr00t_exporter, + end_steps, +): + """Playback using teleop commands to control the robot.""" + ret = True + num_steps = len(wbc_goals) if end_steps == -1 else min(end_steps, len(wbc_goals)) + + for jj in range(num_steps): + wbc_goal = wbc_goals[jj] + teleop_cmd = teleop_cmds[jj] + + # Set IK goal from teleop command + ik_data = { + "body_data": { + teleop_policy.retargeting_ik.body.supplemental_info.hand_frame_names[ + "left" + ]: teleop_cmd["left_wrist"], + teleop_policy.retargeting_ik.body.supplemental_info.hand_frame_names[ + "right" + ]: teleop_cmd["right_wrist"], + }, + "left_hand_data": teleop_cmd["left_fingers"], + "right_hand_data": teleop_cmd["right_fingers"], + } + teleop_policy.retargeting_ik.set_goal(ik_data) + + # Store original and get new upper body pose + target_upper_body_pose = wbc_goal["target_upper_body_pose"].copy() + wbc_goal["target_upper_body_pose"] = teleop_policy.retargeting_ik.get_action() + + # Execute WBC policy + obs = sync_env.observe() + wbc_policy.set_observation(obs) + wbc_policy.set_goal(wbc_goal) + wbc_action = wbc_policy.get_action() + sync_env.queue_action(wbc_action) + + # Save frame if needed + generate_and_save_frame( + config, sync_env, obs, wbc_action, seed, teleop_cmd, wbc_goal, gr00t_exporter + ) + + # Render or capture frame + capture_or_render_frame(env, onscreen, config, video_writer) + + # Validate states + if jj < len(states) - 1: + if not np.allclose( + target_upper_body_pose, wbc_goal["target_upper_body_pose"], atol=1e-5 + ): + err = np.linalg.norm(target_upper_body_pose - wbc_goal["target_upper_body_pose"]) + print( + f"[warning] target_upper_body_pose diverged by {err:.12f} for ep {ep} at step {jj}" + ) + ret = False + + state_playback = env.sim.get_state().flatten() + if not validate_state(states[jj + 1], state_playback, ep, jj): + ret = False + + return ret + + +def playback_actions( + sync_env, + actions, + teleop_cmds, + wbc_goals, + states, + env, + onscreen, + config, + video_writer, + ep, + seed, + gr00t_exporter, + end_steps, +): + """Playback using actions directly.""" + ret = True + num_actions = len(actions) if end_steps == -1 else min(end_steps, len(actions)) + + for j in range(num_actions): + sync_env.queue_action({"q": actions[j]}) + + # Save frame if needed + if config.save_lerobot: + obs = sync_env.observe() + teleop_cmd = teleop_cmds[j] + wbc_goal = wbc_goals[j] + wbc_action = {"q": actions[j]} + generate_and_save_frame( + config, sync_env, obs, wbc_action, seed, teleop_cmd, wbc_goal, gr00t_exporter + ) + + capture_or_render_frame(env, onscreen, config, video_writer) + + if j < len(states) - 1: + state_playback = env.sim.get_state().flatten() + if not validate_state(states[j + 1], state_playback, ep, j): + ret = False + + return ret + + +def playback_states( + sync_env, + states, + actions, + teleop_cmds, + wbc_goals, + env, + onscreen, + config, + video_writer, + seed, + gr00t_exporter, + end_steps, + ep, +): + """Playback by forcing mujoco states directly.""" + ret = True + num_states = len(states) if end_steps == -1 else min(end_steps, len(states)) + + for i in range(num_states): + sync_env.reset_to({"states": states[i]}) + sync_env.render() + + # Validate that the state was set correctly + if i < len(states): + state_playback = env.sim.get_state().flatten() + if not validate_state(states[i], state_playback, ep, i): + ret = False + + # Save frame if needed + if config.save_lerobot: + obs = sync_env.observe() + teleop_cmd = teleop_cmds[i] + wbc_goal = wbc_goals[i] + wbc_action = {"q": actions[i]} + generate_and_save_frame( + config, sync_env, obs, wbc_action, seed, teleop_cmd, wbc_goal, gr00t_exporter + ) + + capture_or_render_frame(env, onscreen, config, video_writer) + + return ret + + +def main(config: SyncSimPlaybackConfig): + ret = True + start_time = time.time() + + np.set_printoptions(precision=5, suppress=True, linewidth=120) + + assert config.dataset is not None, "Folder must be specified for playback" + + seeds, f, script_config = load_lerobot_dataset(config.dataset) + + config.update( + script_config, + allowed_keys=[ + "wbc_version", + "wbc_model_path", + "wbc_policy_class", + "control_frequency", + "enable_waist", + "with_hands", + "env_name", + "robot", + "task_name", + "teleop_frequency", + "data_collection_frequency", + "enable_gravity_compensation", + "gravity_compensation_joints", + ], + ) + config.validate_args() + + robot_type, robot_model = get_robot_type_and_model(config.robot, config.enable_waist) + + # Setup rendering + if config.save_video or config.save_img_obs: + onscreen = False + offscreen = True + else: + onscreen = True + offscreen = False + + # Set default video path if not specified + if config.save_video and config.video_path is None: + if os.path.isfile(config.dataset): + video_folder = Path(config.dataset).parent + else: + video_folder = Path(config.dataset) + video_folder.mkdir(parents=True, exist_ok=True) + config.video_path = str(video_folder / "playback_video.mp4") + print(f"Video recording enabled. Output: {config.video_path}") + + sync_env = get_env(config, onscreen=onscreen, offscreen=offscreen) + + gr00t_exporter = None + if config.save_lerobot: + obs = sync_env.observe() + gr00t_exporter = get_data_exporter(config, obs, robot_model) + + # Initialize policies + wbc_policy, teleop_policy = get_policies( + config, robot_type, robot_model, activate_keyboard_listener=False + ) + + # List of all demonstrations episodes + demos = [f"demo_{i + 1}" for i in range(len(seeds))] + print(f"Loaded and will playback {len(demos)} episodes") + env = sync_env.base_env + + # Setup video writer + video_writer = None + fourcc = None + if config.save_video: + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + video_writer = cv2.VideoWriter( + config.video_path, fourcc, 20, (RS_VIEW_CAMERA_WIDTH, RS_VIEW_CAMERA_HEIGHT) + ) + + print("Loaded {} episodes from {}".format(len(demos), config.dataset)) + print("seeds:", seeds) + print("demos:", demos, "\n\n") + + # Handle episode selection - either limited number or infinite random + max_episodes = len(demos) + episode_count = 0 + while True: + if episode_count >= max_episodes: + break + ep = demos[episode_count] + print(f"Playing back episode: {ep}") + episode_count += 1 + + # read the model xml, using the metadata stored in the attribute for this episode + seed = seeds[int(ep.split("_")[-1]) - 1] + sync_env.reset(seed=seed) + + # load the actions and states + states = f["data/{}/states".format(ep)] + actions = f["data/{}/actions".format(ep)] + teleop_cmds = f["data/{}/teleop_cmd".format(ep)] + wbc_goals = f["data/{}/wbc_goal".format(ep)] + + # reset the policies + wbc_policy, teleop_policy, _ = get_policies( + config, robot_type, robot_model, activate_keyboard_listener=False + ) + end_steps = 20 if config.ci_test else -1 + + if config.use_actions: + # load the initial state + sync_env.reset_to({"states": states[0]}) + # load the actions and play them back open-loop + if config.use_wbc_goals: + # use the wbc_goals to control the robot + episode_ret = playback_wbc_goals( + sync_env, + wbc_policy, + wbc_goals, + teleop_cmds, + states, + env, + onscreen, + config, + video_writer, + ep, + seed, + gr00t_exporter, + end_steps, + ) + ret = ret and episode_ret + elif config.use_teleop_cmd: + # use the teleop commands to control the robot + episode_ret = playback_teleop_cmd( + sync_env, + wbc_policy, + teleop_policy, + wbc_goals, + teleop_cmds, + states, + env, + onscreen, + config, + video_writer, + ep, + seed, + gr00t_exporter, + end_steps, + ) + ret = ret and episode_ret + else: + episode_ret = playback_actions( + sync_env, + actions, + teleop_cmds, + wbc_goals, + states, + env, + onscreen, + config, + video_writer, + ep, + seed, + gr00t_exporter, + end_steps, + ) + ret = ret and episode_ret + else: + # force the sequence of internal mujoco states one by one + episode_ret = playback_states( + sync_env, + states, + actions, + teleop_cmds, + wbc_goals, + env, + onscreen, + config, + video_writer, + seed, + gr00t_exporter, + end_steps, + ep, + ) + ret = ret and episode_ret + + if config.save_lerobot: + gr00t_exporter.save_episode() + + print(f"Episode {ep} playback finished.\n\n") + + # close the env + sync_env.close() + + # Cleanup + if video_writer is not None: + video_writer.release() + print(f"Video saved to: {config.video_path}") + + end_time = time.time() + elapsed_time = end_time - start_time + + if config.save_lerobot: + print(f"LeRobot dataset saved to: {gr00t_exporter.root}") + + print( + f"{GREEN_BOLD}Playback with WBC version: {config.wbc_version}, {config.wbc_model_path}, " + f"{config.wbc_policy_class}, use_actions: {config.use_actions}, use_wbc_goals: {config.use_wbc_goals}, " + f"use_teleop_cmd: {config.use_teleop_cmd}{RESET}" + ) + if ret: + print(f"{GREEN_BOLD}Playback completed successfully in {elapsed_time:.2f} seconds!{RESET}") + else: + print(f"{RED_BOLD}Playback encountered an error in {elapsed_time:.2f} seconds!{RESET}") + + return ret + + +def capture_or_render_frame( + env: RobotEnv, + onscreen: bool, + config: SyncSimPlaybackConfig, + video_writer: Optional[cv2.VideoWriter], +): + """Capture frame for video recording if enabled, or render the environment.""" + if config.save_video: + if hasattr(env, "sim") and hasattr(env.sim, "render"): + img = env.sim.render( + width=RS_VIEW_CAMERA_WIDTH, + height=RS_VIEW_CAMERA_HEIGHT, + camera_name=env.render_camera[0], + ) + img_bgr = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) + img_bgr = np.flipud(img_bgr) + video_writer.write(img_bgr) + elif onscreen: + env.render() + + +if __name__ == "__main__": + config = tyro.cli(SyncSimPlaybackConfig) + + rclpy.init(args=None) + node = rclpy.create_node("playback_decoupled_wbc_control") + + main(config) + + rclpy.shutdown() diff --git a/decoupled_wbc/control/main/teleop/run_camera_viewer.py b/decoupled_wbc/control/main/teleop/run_camera_viewer.py new file mode 100644 index 0000000..15a6439 --- /dev/null +++ b/decoupled_wbc/control/main/teleop/run_camera_viewer.py @@ -0,0 +1,249 @@ +""" +Camera viewer with manual recording support. + +This script provides a camera viewer that can display multiple camera streams +and record them to video files with manual start/stop controls. + +Features: +- Onscreen mode: Display camera feeds with optional recording +- Offscreen mode: No display, recording only when triggered +- Manual recording control with keyboard (R key to start/stop) + +Usage Examples: + +1. Basic onscreen viewing (with recording capability): + python run_camera_viewer.py --camera-host localhost --camera-port 5555 + +2. Offscreen mode (no display, recording only): + python run_camera_viewer.py --offscreen --camera-host localhost --camera-port 5555 + +3. Custom output directory: + python run_camera_viewer.py --output-path ./my_recordings --camera-host localhost + +Controls: +- R key: Start/Stop recording +- Q key: Quit application + +Output Structure: +camera_output_20241211_143052/ +├── rec_143205/ +│ ├── ego_view_color_image.mp4 +│ ├── head_left_color_image.mp4 +│ └── head_right_color_image.mp4 +└── rec_143410/ + ├── ego_view_color_image.mp4 + └── head_left_color_image.mp4 +""" + +from dataclasses import dataclass +from pathlib import Path +import threading +import time +from typing import Any, Optional + +import cv2 +import rclpy +from sshkeyboard import listen_keyboard, stop_listening +import tyro + +from decoupled_wbc.control.main.teleop.configs.configs import ComposedCameraClientConfig +from decoupled_wbc.control.sensor.composed_camera import ComposedCameraClientSensor +from decoupled_wbc.control.utils.img_viewer import ImageViewer + + +@dataclass +class CameraViewerConfig(ComposedCameraClientConfig): + """Config for running the camera viewer with recording support.""" + + offscreen: bool = False + """Run in offscreen mode (no display, manual recording with R key).""" + + output_path: Optional[str] = None + """Output path for saving videos. If None, auto-generates path.""" + + codec: str = "mp4v" + """Video codec to use for saving (e.g., 'mp4v', 'XVID').""" + + +ArgsConfig = CameraViewerConfig + + +def _get_camera_titles(image_data: dict[str, Any]) -> list[str]: + """ + Detect all the individual camera streams from the image data. + + schema format: + { + "timestamps": {"ego_view": 123.45, "ego_view_left_mono": 123.46}, + "images": {"ego_view": np.ndarray, "ego_view_left_mono": np.ndarray} + } + + Returns list of camera keys (e.g., ["ego_view", "ego_view_left_mono", "ego_view_right_mono"]) + """ + # Extract all camera keys from the images dictionary + camera_titles = list(image_data.get("images", {}).keys()) + return camera_titles + + +def main(config: ArgsConfig): + """Main function to run the camera viewer.""" + # Initialize ROS + rclpy.init(args=None) + node = rclpy.create_node("camera_viewer") + + # Start ROS spin in a separate thread + thread = threading.Thread(target=rclpy.spin, args=(node,), daemon=True) + thread.start() + + image_sub = ComposedCameraClientSensor(server_ip=config.camera_host, port=config.camera_port) + + # pre-fetch a sample image to get the number of camera angles + retry_count = 0 + while True: + _sample_image = image_sub.read() + if _sample_image: + break + retry_count += 1 + time.sleep(0.1) + if retry_count > 10: + raise Exception("Failed to get sample image") + + camera_titles = _get_camera_titles(_sample_image) + + # Setup output directory + if config.output_path is None: + output_dir = Path("camera_recordings") + else: + output_dir = Path(config.output_path) + + # Recording state + is_recording = False + video_writers = {} + frame_count = 0 + recording_start_time = None + should_quit = False + + def on_press(key): + nonlocal is_recording, video_writers, frame_count, recording_start_time, should_quit + + if key == "r": + if not is_recording: + # Start recording + recording_dir = output_dir / f"rec_{time.strftime('%Y%m%d_%H%M%S')}" + recording_dir.mkdir(parents=True, exist_ok=True) + + # Create video writers + fourcc = cv2.VideoWriter_fourcc(*config.codec) + video_writers = {} + + for title in camera_titles: + img = _sample_image["images"].get(title) + if img is not None: + height, width = img.shape[:2] + video_path = recording_dir / f"{title}.mp4" + writer = cv2.VideoWriter( + str(video_path), fourcc, config.fps, (width, height) + ) + video_writers[title] = writer + + is_recording = True + recording_start_time = time.time() + frame_count = 0 + print(f"🔴 Recording started: {recording_dir}") + else: + # Stop recording + is_recording = False + for title, writer in video_writers.items(): + writer.release() + video_writers = {} + + duration = time.time() - recording_start_time if recording_start_time else 0 + print(f"⏹️ Recording stopped - {duration:.1f}s, {frame_count} frames") + elif key == "q": + should_quit = True + stop_listening() + + # Setup keyboard listener in a separate thread + keyboard_thread = threading.Thread( + target=lambda: listen_keyboard(on_press=on_press), daemon=True + ) + keyboard_thread.start() + + # Setup viewer for onscreen mode + viewer = None + if not config.offscreen: + viewer = ImageViewer( + title="Camera Viewer", + figsize=(10, 8), + num_images=len(camera_titles), + image_titles=camera_titles, + ) + + # Print instructions + mode = "Offscreen" if config.offscreen else "Onscreen" + print(f"{mode} mode - Target FPS: {config.fps}") + print(f"Videos will be saved to: {output_dir}") + print("Controls: R key to start/stop recording, Q key to quit, Ctrl+C to exit") + + # Create ROS rate controller + rate = node.create_rate(config.fps) + + try: + while rclpy.ok() and not should_quit: + # Get images from all subscribers + images = [] + image_data = image_sub.read() + if image_data: + for title in camera_titles: + img = image_data["images"].get(title) + images.append(img) + + # Save frame if recording + if is_recording and img is not None and title in video_writers: + # Convert from RGB to BGR for OpenCV + if len(img.shape) == 3 and img.shape[2] == 3: + img_bgr = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) + else: + img_bgr = img + video_writers[title].write(img_bgr) + + # Display images if not offscreen + if not config.offscreen and viewer and any(img is not None for img in images): + status = "🔴 REC" if is_recording else "⏸️ Ready" + viewer._fig.suptitle(f"Camera Viewer - {status}") + viewer.show_multiple(images) + + # Progress feedback + if is_recording: + frame_count += 1 + if frame_count % 100 == 0: + duration = time.time() - recording_start_time + print(f"Recording: {frame_count} frames ({duration:.1f}s)") + + rate.sleep() + + except KeyboardInterrupt: + print("\nExiting...") + finally: + # Cleanup + try: + stop_listening() + except Exception: + pass + + if video_writers: + for title, writer in video_writers.items(): + writer.release() + if is_recording: + duration = time.time() - recording_start_time + print(f"Final: {duration:.1f}s, {frame_count} frames") + + if viewer: + viewer.close() + + rclpy.shutdown() + + +if __name__ == "__main__": + config = tyro.cli(ArgsConfig) + main(config) diff --git a/decoupled_wbc/control/main/teleop/run_g1_control_loop.py b/decoupled_wbc/control/main/teleop/run_g1_control_loop.py new file mode 100644 index 0000000..2cec57f --- /dev/null +++ b/decoupled_wbc/control/main/teleop/run_g1_control_loop.py @@ -0,0 +1,236 @@ +from copy import deepcopy +import time + +import tyro + +from decoupled_wbc.control.envs.g1.g1_env import G1Env +from decoupled_wbc.control.main.constants import ( + CONTROL_GOAL_TOPIC, + DEFAULT_BASE_HEIGHT, + DEFAULT_NAV_CMD, + DEFAULT_WRIST_POSE, + JOINT_SAFETY_STATUS_TOPIC, + LOWER_BODY_POLICY_STATUS_TOPIC, + ROBOT_CONFIG_TOPIC, + STATE_TOPIC_NAME, +) +from decoupled_wbc.control.main.teleop.configs.configs import ControlLoopConfig +from decoupled_wbc.control.policy.wbc_policy_factory import get_wbc_policy +from decoupled_wbc.control.robot_model.instantiation.g1 import ( + instantiate_g1_robot_model, +) +from decoupled_wbc.control.utils.keyboard_dispatcher import ( + KeyboardDispatcher, + KeyboardEStop, + KeyboardListenerPublisher, + ROSKeyboardDispatcher, +) +from decoupled_wbc.control.utils.ros_utils import ( + ROSManager, + ROSMsgPublisher, + ROSMsgSubscriber, + ROSServiceServer, +) +from decoupled_wbc.control.utils.telemetry import Telemetry + +CONTROL_NODE_NAME = "ControlPolicy" + + +def main(config: ControlLoopConfig): + ros_manager = ROSManager(node_name=CONTROL_NODE_NAME) + node = ros_manager.node + + # start the robot config server + ROSServiceServer(ROBOT_CONFIG_TOPIC, config.to_dict()) + + wbc_config = config.load_wbc_yaml() + + data_exp_pub = ROSMsgPublisher(STATE_TOPIC_NAME) + lower_body_policy_status_pub = ROSMsgPublisher(LOWER_BODY_POLICY_STATUS_TOPIC) + joint_safety_status_pub = ROSMsgPublisher(JOINT_SAFETY_STATUS_TOPIC) + + # Initialize telemetry + telemetry = Telemetry(window_size=100) + + waist_location = "lower_and_upper_body" if config.enable_waist else "lower_body" + robot_model = instantiate_g1_robot_model( + waist_location=waist_location, high_elbow_pose=config.high_elbow_pose + ) + + env = G1Env( + env_name=config.env_name, + robot_model=robot_model, + config=wbc_config, + wbc_version=config.wbc_version, + ) + if env.sim and not config.sim_sync_mode: + env.start_simulator() + + wbc_policy = get_wbc_policy("g1", robot_model, wbc_config, config.upper_body_joint_speed) + + keyboard_listener_pub = KeyboardListenerPublisher() + keyboard_estop = KeyboardEStop() + if config.keyboard_dispatcher_type == "raw": + dispatcher = KeyboardDispatcher() + elif config.keyboard_dispatcher_type == "ros": + dispatcher = ROSKeyboardDispatcher() + else: + raise ValueError( + f"Invalid keyboard dispatcher: {config.keyboard_dispatcher_type}, please use 'raw' or 'ros'" + ) + dispatcher.register(env) + dispatcher.register(wbc_policy) + dispatcher.register(keyboard_listener_pub) + dispatcher.register(keyboard_estop) + dispatcher.start() + + rate = node.create_rate(config.control_frequency) + + upper_body_policy_subscriber = ROSMsgSubscriber(CONTROL_GOAL_TOPIC) + + last_teleop_cmd = None + try: + while ros_manager.ok(): + t_start = time.monotonic() + with telemetry.timer("total_loop"): + # Step simulator if in sync mode + with telemetry.timer("step_simulator"): + if env.sim and config.sim_sync_mode: + env.step_simulator() + + # Measure observation time + with telemetry.timer("observe"): + obs = env.observe() + wbc_policy.set_observation(obs) + + # Measure policy setup time + with telemetry.timer("policy_setup"): + upper_body_cmd = upper_body_policy_subscriber.get_msg() + + t_now = time.monotonic() + + wbc_goal = {} + if upper_body_cmd: + wbc_goal = upper_body_cmd.copy() + last_teleop_cmd = upper_body_cmd.copy() + if config.ik_indicator: + env.set_ik_indicator(upper_body_cmd) + # Send goal to policy + if wbc_goal: + wbc_goal["interpolation_garbage_collection_time"] = t_now - 2 * ( + 1 / config.control_frequency + ) + wbc_policy.set_goal(wbc_goal) + + # Measure policy action calculation time + with telemetry.timer("policy_action"): + wbc_action = wbc_policy.get_action(time=t_now) + + # Measure action queue time + with telemetry.timer("queue_action"): + env.queue_action(wbc_action) + + # Publish status information for InteractiveModeController + with telemetry.timer("publish_status"): + # Get policy status - check if the lower body policy has use_policy_action enabled + policy_use_action = False + try: + # Access the lower body policy through the decoupled whole body policy + if hasattr(wbc_policy, "lower_body_policy"): + policy_use_action = getattr( + wbc_policy.lower_body_policy, "use_policy_action", False + ) + except (AttributeError, TypeError): + policy_use_action = False + + policy_status_msg = {"use_policy_action": policy_use_action, "timestamp": t_now} + lower_body_policy_status_pub.publish(policy_status_msg) + + # Get joint safety status from G1Env (which already runs the safety monitor) + joint_safety_ok = env.get_joint_safety_status() + + joint_safety_status_msg = { + "joint_safety_ok": joint_safety_ok, + "timestamp": t_now, + } + joint_safety_status_pub.publish(joint_safety_status_msg) + + # Start or Stop data collection + if wbc_goal.get("toggle_data_collection", False): + dispatcher.handle_key("c") + + # Abort the current episode + if wbc_goal.get("toggle_data_abort", False): + dispatcher.handle_key("x") + + if env.use_sim and wbc_goal.get("reset_env_and_policy", False): + print("Resetting sim environment and policy") + # Reset teleop policy & sim env + dispatcher.handle_key("k") + + # Clear upper body commands + upper_body_policy_subscriber._msg = None + upper_body_cmd = { + "target_upper_body_pose": obs["q"][ + robot_model.get_joint_group_indices("upper_body") + ], + "wrist_pose": DEFAULT_WRIST_POSE, + "base_height_command": DEFAULT_BASE_HEIGHT, + "navigate_cmd": DEFAULT_NAV_CMD, + } + last_teleop_cmd = upper_body_cmd.copy() + + time.sleep(0.5) + + msg = deepcopy(obs) + for key in obs.keys(): + if key.endswith("_image"): + del msg[key] + + # exporting data + if last_teleop_cmd: + msg.update( + { + "action": wbc_action["q"], + "action.eef": last_teleop_cmd.get("wrist_pose", DEFAULT_WRIST_POSE), + "base_height_command": last_teleop_cmd.get( + "base_height_command", DEFAULT_BASE_HEIGHT + ), + "navigate_command": last_teleop_cmd.get( + "navigate_cmd", DEFAULT_NAV_CMD + ), + "timestamps": { + "main_loop": time.time(), + "proprio": time.time(), + }, + } + ) + data_exp_pub.publish(msg) + end_time = time.monotonic() + + if env.sim and (not env.sim.sim_thread or not env.sim.sim_thread.is_alive()): + raise RuntimeError("Simulator thread is not alive") + + rate.sleep() + + # Log timing information every 100 iterations (roughly every 2 seconds at 50Hz) + if config.verbose_timing: + # When verbose timing is enabled, always show timing + telemetry.log_timing_info(context="G1 Control Loop", threshold=0.0) + elif (end_time - t_start) > (1 / config.control_frequency) and not config.sim_sync_mode: + # Only show timing when loop is slow and verbose_timing is disabled + telemetry.log_timing_info(context="G1 Control Loop Missed", threshold=0.001) + + except ros_manager.exceptions() as e: + print(f"ROSManager interrupted by user: {e}") + finally: + print("Cleaning up...") + # the order of the following is important + dispatcher.stop() + ros_manager.shutdown() + env.close() + + +if __name__ == "__main__": + config = tyro.cli(ControlLoopConfig) + main(config) diff --git a/decoupled_wbc/control/main/teleop/run_g1_data_exporter.py b/decoupled_wbc/control/main/teleop/run_g1_data_exporter.py new file mode 100644 index 0000000..aae1fe0 --- /dev/null +++ b/decoupled_wbc/control/main/teleop/run_g1_data_exporter.py @@ -0,0 +1,364 @@ +from collections import deque +from datetime import datetime +import threading +import time + +import numpy as np +import rclpy +import tyro + +from decoupled_wbc.control.main.constants import ROBOT_CONFIG_TOPIC, STATE_TOPIC_NAME +from decoupled_wbc.control.main.teleop.configs.configs import DataExporterConfig +from decoupled_wbc.control.robot_model.instantiation import g1 +from decoupled_wbc.control.sensor.composed_camera import ComposedCameraClientSensor +from decoupled_wbc.control.utils.episode_state import EpisodeState +from decoupled_wbc.control.utils.keyboard_dispatcher import KeyboardListenerSubscriber +from decoupled_wbc.control.utils.ros_utils import ROSMsgSubscriber, ROSServiceClient +from decoupled_wbc.control.utils.telemetry import Telemetry +from decoupled_wbc.control.utils.text_to_speech import TextToSpeech +from decoupled_wbc.data.constants import BUCKET_BASE_PATH +from decoupled_wbc.data.exporter import DataCollectionInfo, Gr00tDataExporter +from decoupled_wbc.data.utils import get_dataset_features, get_modality_config + + +class TimeDeltaException(Exception): + def __init__(self, failure_count: int, reset_timeout_sec: float): + """ + Exception raised when the time delta between two messages exceeds + a threshold for a consecutive number of times + """ + self.failure_count = failure_count + self.reset_timeout_sec = reset_timeout_sec + self.message = f"{self.failure_count} failures in {self.reset_timeout_sec} seconds" + super().__init__(self.message) + + +class TimingThresholdMonitor: + def __init__(self, max_failures=3, reset_timeout_sec=5, time_delta=0.2, raise_exception=False): + """ + Monitor the time diff (between two messages) and optionally raise an exception + if there is a consistent violations + """ + self.max_failures = max_failures + self.reset_timeout_sec = reset_timeout_sec + self.failure_count = 0 + self.last_failure_time = 0 + self.time_delta = time_delta + self.raise_exception = raise_exception + + def reset(self): + self.failure_count = 0 + self.last_failure_time = 0 + + def log_time_delta(self, time_delta_sec: float): + time_delta = abs(time_delta_sec) + if time_delta > self.time_delta: + self.failure_count += 1 + self.last_failure_time = time.monotonic() + + if self.is_threshold_exceeded(): + print( + f"Time delta exception: {self.failure_count} failures in {self.reset_timeout_sec} seconds" + f", time delta: {time_delta}" + ) + if self.raise_exception: + raise TimeDeltaException(self.failure_count, self.reset_timeout_sec) + + def is_threshold_exceeded(self): + if self.failure_count >= self.max_failures: + return True + if time.monotonic() - self.last_failure_time > self.reset_timeout_sec: + self.reset() + return False + + +class Gr00tDataCollector: + def __init__( + self, + node, + camera_host: str, + camera_port: int, + state_topic_name: str, + data_exporter: Gr00tDataExporter, + text_to_speech=None, + frequency=20, + state_act_msg_frequency=50, + ): + + self.text_to_speech = text_to_speech + self.frequency = frequency + self.data_exporter = data_exporter + + self.node = node + + thread = threading.Thread(target=rclpy.spin, args=(self.node,), daemon=True) + thread.start() + time.sleep(0.5) + + self._episode_state = EpisodeState() + self._keyboard_listener = KeyboardListenerSubscriber() + self._state_subscriber = ROSMsgSubscriber(state_topic_name) + self._image_subscriber = ComposedCameraClientSensor(server_ip=camera_host, port=camera_port) + self.rate = self.node.create_rate(self.frequency) + + self.obs_act_buffer = deque(maxlen=100) + self.latest_image_msg = None + self.latest_proprio_msg = None + + self.state_polling_rate = 1 / state_act_msg_frequency + self.last_state_poll_time = time.monotonic() + + self.telemetry = Telemetry(window_size=100) + self.timing_threshold_monitor = TimingThresholdMonitor() + + print(f"Recording to {self.data_exporter.meta.root}") + + @property + def current_episode_index(self): + return self.data_exporter.episode_buffer["episode_index"] + + def _print_and_say(self, message: str, say: bool = True): + """Helper to use TextToSpeech print_and_say or fallback to print.""" + if self.text_to_speech is not None: + self.text_to_speech.print_and_say(message, say) + else: + print(message) + + def _check_keyboard_input(self): + key = self._keyboard_listener.read_msg() + if key == "c": + self._episode_state.change_state() + if self._episode_state.get_state() == self._episode_state.RECORDING: + self._print_and_say(f"Started recording {self.current_episode_index}") + elif self._episode_state.get_state() == self._episode_state.NEED_TO_SAVE: + self._print_and_say("Stopping recording, preparing to save") + elif self._episode_state.get_state() == self._episode_state.IDLE: + self._print_and_say("Saved episode and back to idle state") + elif key == "x": + if self._episode_state.get_state() == self._episode_state.RECORDING: + self.data_exporter.save_episode_as_discarded() + self._episode_state.reset_state() + self._print_and_say("Discarded episode") + + def _add_data_frame(self): + t_start = time.monotonic() + + if self.latest_proprio_msg is None or self.latest_image_msg is None: + self._print_and_say( + f"Waiting for message. " + f"Avail msg: proprio {self.latest_proprio_msg is not None} | " + f"image {self.latest_image_msg is not None}", + say=False, + ) + return False + + if self._episode_state.get_state() == self._episode_state.RECORDING: + + # Calculate max time delta between images and proprio + max_time_delta = 0 + for _, image_time in self.latest_image_msg["timestamps"].items(): + time_delta = abs(image_time - self.latest_proprio_msg["timestamps"]["proprio"]) + max_time_delta = max(max_time_delta, time_delta) + + self.timing_threshold_monitor.log_time_delta(max_time_delta) + if (self.timing_threshold_monitor.failure_count + 1) % 100 == 0: + self._print_and_say("Image state delta too high, please discard data") + + frame_data = { + "observation.state": self.latest_proprio_msg["q"], + "observation.eef_state": self.latest_proprio_msg["wrist_pose"], + "action": self.latest_proprio_msg["action"], + "action.eef": self.latest_proprio_msg["action.eef"], + "observation.img_state_delta": ( + np.array( + [max_time_delta], + dtype=np.float32, + ) + ), # lerobot only supports adding numpy arrays + "teleop.navigate_command": np.array( + self.latest_proprio_msg["navigate_command"], dtype=np.float64 + ), + "teleop.base_height_command": np.array( + [self.latest_proprio_msg["base_height_command"]], dtype=np.float64 + ), + } + + # Add images based on dataset features + images = self.latest_image_msg["images"] + for feature_name, feature_info in self.data_exporter.features.items(): + if feature_info.get("dtype") in ["image", "video"]: + # Extract image key from feature name (e.g., "observation.images.ego_view" -> "ego_view") + image_key = feature_name.split(".")[-1] + + if image_key not in images: + raise ValueError( + f"Required image '{image_key}' for feature '{feature_name}' " + f"not found in image message. Available images: {list(images.keys())}" + ) + frame_data[feature_name] = images[image_key] + + self.data_exporter.add_frame(frame_data) + + t_end = time.monotonic() + if t_end - t_start > (1 / self.frequency): + print(f"DataExporter Missed: {t_end - t_start} sec") + + if self._episode_state.get_state() == self._episode_state.NEED_TO_SAVE: + self.data_exporter.save_episode() + self.timing_threshold_monitor.reset() + self._print_and_say("Finished saving episode") + self._episode_state.change_state() + + return True + + def save_and_cleanup(self): + try: + self._print_and_say("saving episode done") + # save on going episode if any + buffer_size = self.data_exporter.episode_buffer.get("size", 0) + if buffer_size > 0: + self.data_exporter.save_episode() + self._print_and_say(f"Recording complete: {self.data_exporter.meta.root}", say=False) + except Exception as e: + self._print_and_say(f"Error saving episode: {e}") + + self.node.destroy_node() + rclpy.shutdown() + self._print_and_say("Shutting down data exporter...", say=False) + + def run(self): + try: + while rclpy.ok(): + t_start = time.monotonic() + with self.telemetry.timer("total_loop"): + # 1. poll proprio msg + with self.telemetry.timer("poll_state"): + msg = self._state_subscriber.get_msg() + if msg is not None: + self.latest_proprio_msg = msg + + # 2. poll image msg + with self.telemetry.timer("poll_image"): + msg = self._image_subscriber.read() + if msg is not None: + self.latest_image_msg = msg + + # 3. check keyboard input + with self.telemetry.timer("check_keyboard"): + self._check_keyboard_input() + + # 4. add frame + with self.telemetry.timer("add_frame"): + self._add_data_frame() + + end_time = time.monotonic() + + self.rate.sleep() + + # Log timing information if we missed our target frequency + if (end_time - t_start) > (1 / self.frequency): + self.telemetry.log_timing_info( + context="Data Exporter Loop Missed", threshold=0.001 + ) + + except KeyboardInterrupt: + print("Data exporter terminated by user") + # The user will trigger a keyboard interrupt if there's something wrong, + # so we flag the ongoing episode as discarded + buffer_size = self.data_exporter.episode_buffer.get("size", 0) + if buffer_size > 0: + self.data_exporter.save_episode_as_discarded() + + finally: + self.save_and_cleanup() + + +def main(config: DataExporterConfig): + + rclpy.init(args=None) + node = rclpy.create_node("data_exporter") + + waist_location = "lower_and_upper_body" if config.enable_waist else "lower_body" + g1_rm = g1.instantiate_g1_robot_model( + waist_location=waist_location, high_elbow_pose=config.high_elbow_pose + ) + + dataset_features = get_dataset_features(g1_rm, config.add_stereo_camera) + modality_config = get_modality_config(g1_rm, config.add_stereo_camera) + + text_to_speech = TextToSpeech() if config.text_to_speech else None + + # Only set DataCollectionInfo if we're creating a new dataset + # When adding to existing dataset, DataCollectionInfo will be ignored + if config.robot_id is not None: + data_collection_info = DataCollectionInfo( + teleoperator_username=config.teleoperator_username, + support_operator_username=config.support_operator_username, + robot_type="g1", + robot_id=config.robot_id, + lower_body_policy=config.lower_body_policy, + wbc_model_path=config.wbc_model_path, + ) + else: + # Use default DataCollectionInfo when adding to existing dataset + # This will be ignored if the dataset already exists + data_collection_info = DataCollectionInfo() + + robot_config_client = ROSServiceClient(ROBOT_CONFIG_TOPIC) + robot_config = robot_config_client.get_config() + + data_exporter = Gr00tDataExporter.create( + save_root=f"{config.root_output_dir}/{config.dataset_name}", + fps=config.data_collection_frequency, + features=dataset_features, + modality_config=modality_config, + task=config.task_prompt, + upload_bucket_path=BUCKET_BASE_PATH, + data_collection_info=data_collection_info, + script_config=robot_config, + ) + + data_collector = Gr00tDataCollector( + node=node, + frequency=config.data_collection_frequency, + data_exporter=data_exporter, + state_topic_name=STATE_TOPIC_NAME, + camera_host=config.camera_host, + camera_port=config.camera_port, + text_to_speech=text_to_speech, + ) + data_collector.run() + + +if __name__ == "__main__": + config = tyro.cli(DataExporterConfig) + config.task_prompt = input("Enter the task prompt: ").strip().lower() + add_to_existing_dataset = input("Add to existing dataset? (y/n): ").strip().lower() + + if add_to_existing_dataset == "y": + config.dataset_name = input("Enter the dataset name: ").strip().lower() + # When adding to existing dataset, we don't need robot_id or operator usernames + # as they should already be set in the existing dataset + elif add_to_existing_dataset == "n": + # robot_id = input("Enter the robot ID: ").strip().lower() + # if robot_id not in G1_ROBOT_IDS: + # raise ValueError(f"Invalid robot ID: {robot_id}. Available robot IDs: {G1_ROBOT_IDS}") + config.robot_id = "sim" + config.dataset_name = f"{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}-G1-{config.robot_id}" + + # Only ask for operator usernames when creating a new dataset + # print("Available teleoperator usernames:") + # for i, username in enumerate(OPERATOR_USERNAMES): + # print(f"{i}: {username}") + # teleop_idx = int(input("Select teleoperator username index: ")) + # config.teleoperator_username = OPERATOR_USERNAMES[teleop_idx] + config.teleoperator_username = "NEW_USER" + + # print("\nAvailable support operator usernames:") + # for i, username in enumerate(OPERATOR_USERNAMES): + # print(f"{i}: {username}") + # support_idx = int(input("Select support operator username index: ")) + # config.support_operator_username = OPERATOR_USERNAMES[support_idx] + config.support_operator_username = "NEW_USER" + + main(config) diff --git a/decoupled_wbc/control/main/teleop/run_navigation_policy_loop.py b/decoupled_wbc/control/main/teleop/run_navigation_policy_loop.py new file mode 100644 index 0000000..54dfabd --- /dev/null +++ b/decoupled_wbc/control/main/teleop/run_navigation_policy_loop.py @@ -0,0 +1,68 @@ +import threading +import time + +import rclpy + +from decoupled_wbc.control.main.constants import NAV_CMD_TOPIC +from decoupled_wbc.control.policy.keyboard_navigation_policy import KeyboardNavigationPolicy +from decoupled_wbc.control.utils.keyboard_dispatcher import KeyboardListenerSubscriber +from decoupled_wbc.control.utils.ros_utils import ROSMsgPublisher + +FREQUENCY = 10 +NAV_NODE_NAME = "NavigationPolicy" + + +def main(): + rclpy.init(args=None) + node = rclpy.create_node(NAV_NODE_NAME) + + # Start ROS spin in a separate thread + thread = threading.Thread(target=rclpy.spin, args=(node,), daemon=True) + thread.start() + time.sleep(0.5) + + dict_publisher = ROSMsgPublisher(NAV_CMD_TOPIC) + keyboard_listener = KeyboardListenerSubscriber() + + # Initialize navigation policy + navigation_policy = KeyboardNavigationPolicy() + + # Create rate controller + rate = node.create_rate(FREQUENCY) + + try: + while rclpy.ok(): + t_now = time.monotonic() + # get keyboard input + + navigation_policy.handle_keyboard_button(keyboard_listener.read_msg()) + # Get action from navigation policy + action = navigation_policy.get_action(time=t_now) + + # Add timestamp to the data + action["timestamp"] = t_now + + # Create and publish ByteMultiArray message + dict_publisher.publish(action) + + # Print status periodically (optional) + if int(t_now * 10) % 10 == 0: + nav_cmd = action["navigate_cmd"] + node.get_logger().info( + f"Nav cmd: linear=({nav_cmd[0]:.2f}, {nav_cmd[1]:.2f}), " + f"angular={nav_cmd[2]:.2f}" + ) + + rate.sleep() + + except KeyboardInterrupt: + print("Navigation control loop terminated by user") + + finally: + # Clean shutdown + node.destroy_node() + rclpy.shutdown() + + +if __name__ == "__main__": + main() diff --git a/decoupled_wbc/control/main/teleop/run_sim_loop.py b/decoupled_wbc/control/main/teleop/run_sim_loop.py new file mode 100644 index 0000000..598aa0c --- /dev/null +++ b/decoupled_wbc/control/main/teleop/run_sim_loop.py @@ -0,0 +1,61 @@ +from typing import Dict + +import tyro + +from decoupled_wbc.control.envs.g1.sim.simulator_factory import SimulatorFactory, init_channel +from decoupled_wbc.control.main.teleop.configs.configs import SimLoopConfig +from decoupled_wbc.control.robot_model.instantiation.g1 import ( + instantiate_g1_robot_model, +) +from decoupled_wbc.control.robot_model.robot_model import RobotModel + +ArgsConfig = SimLoopConfig + + +class SimWrapper: + def __init__(self, robot_model: RobotModel, env_name: str, config: Dict[str, any], **kwargs): + self.robot_model = robot_model + self.config = config + + init_channel(config=self.config) + + # Create simulator using factory + self.sim = SimulatorFactory.create_simulator( + config=self.config, + env_name=env_name, + **kwargs, + ) + + +def main(config: ArgsConfig): + wbc_config = config.load_wbc_yaml() + # NOTE: we will override the interface to local if it is not specified + wbc_config["ENV_NAME"] = config.env_name + + if config.enable_image_publish: + assert ( + config.enable_offscreen + ), "enable_offscreen must be True when enable_image_publish is True" + + robot_model = instantiate_g1_robot_model() + + sim_wrapper = SimWrapper( + robot_model=robot_model, + env_name=config.env_name, + config=wbc_config, + onscreen=wbc_config.get("ENABLE_ONSCREEN", True), + offscreen=wbc_config.get("ENABLE_OFFSCREEN", False), + ) + # Start simulator as independent process + SimulatorFactory.start_simulator( + sim_wrapper.sim, + as_thread=False, + enable_image_publish=config.enable_image_publish, + mp_start_method=config.mp_start_method, + camera_port=config.camera_port, + ) + + +if __name__ == "__main__": + config = tyro.cli(ArgsConfig) + main(config) diff --git a/decoupled_wbc/control/main/teleop/run_sync_sim_data_collection.py b/decoupled_wbc/control/main/teleop/run_sync_sim_data_collection.py new file mode 100644 index 0000000..3516725 --- /dev/null +++ b/decoupled_wbc/control/main/teleop/run_sync_sim_data_collection.py @@ -0,0 +1,213 @@ +from pathlib import Path +import time + +import tyro + +from decoupled_wbc.control.main.teleop.configs.configs import SyncSimDataCollectionConfig +from decoupled_wbc.control.robot_model.instantiation import get_robot_type_and_model +from decoupled_wbc.control.utils.keyboard_dispatcher import ( + KeyboardDispatcher, + KeyboardListener, +) +from decoupled_wbc.control.utils.ros_utils import ROSManager +from decoupled_wbc.control.utils.sync_sim_utils import ( + COLLECTION_KEY, + SKIP_KEY, + CITestManager, + EpisodeManager, + generate_frame, + get_data_exporter, + get_env, + get_policies, +) +from decoupled_wbc.control.utils.telemetry import Telemetry + +CONTROL_NODE_NAME = "ControlPolicy" + + +CONTROL_CMD_TOPIC = CONTROL_NODE_NAME + "/q_target" +ENV_NODE_NAME = "SyncEnv" +ENV_OBS_TOPIC = ENV_NODE_NAME + "/obs" + + +def display_controls(config: SyncSimDataCollectionConfig): + """ + Method to pretty print controls. + """ + + def print_command(char, info): + char += " " * (30 - len(char)) + print("{}\t{}".format(char, info)) + + print("") + print_command("Keys", "Command") + if config.manual_control: + print_command(COLLECTION_KEY, "start/stop data collection") + print_command(SKIP_KEY, "skip and collect new episodes") + print_command("w-s-a-d", "move horizontally in x-y plane (press '=' first to enable)") + print_command("q", "rotate (counter-clockwise)") + print_command("e", "rotate (clockwise)") + print_command("space", "reset all velocity to zero") + print("") + + +def main(config: SyncSimDataCollectionConfig): + ros_manager = ROSManager(node_name=CONTROL_NODE_NAME) + node = ros_manager.node + + # Initialize telemetry + telemetry = Telemetry(window_size=100) + + # Initialize robot model + robot_type, robot_model = get_robot_type_and_model( + config.robot, + enable_waist_ik=config.enable_waist, + ) + + # Initialize sim env + env = get_env(config, onscreen=config.enable_onscreen, offscreen=config.save_img_obs) + seed = int(time.time()) + env.reset(seed) + env.render() + obs = env.observe() + robot_model.set_initial_body_pose(obs["q"]) + + # Initialize data exporter + exporter = get_data_exporter( + config, + obs, + robot_model, + save_path=Path("./outputs/ci_test/") if config.ci_test else None, + ) + + # Display control signals + display_controls(config) + + # Initialize policies + wbc_policy, teleop_policy = get_policies(config, robot_type, robot_model) + + dispatcher = KeyboardDispatcher() + keyboard_listener = KeyboardListener() # for data collection keys + dispatcher.register(keyboard_listener) + dispatcher.register(wbc_policy) + dispatcher.register(teleop_policy) + + dispatcher.start() + + rate = node.create_rate(config.control_frequency) + + # Initialize episode manager to handle state transitions and data collection + episode_manager = EpisodeManager(config) + + # Initialize CI test manager + ci_test_manager = CITestManager(config) if config.ci_test else None + + try: + while ros_manager.ok(): + + need_reset = False + keyboard_input = keyboard_listener.pop_key() + + with telemetry.timer("total_loop"): + max_mujoco_state_len, mujoco_state_len, mujoco_state = env.get_mujoco_state_info() + + # Measure observation time + with telemetry.timer("observe"): + obs = env.observe() + wbc_policy.set_observation(obs) + + # Measure policy setup time + with telemetry.timer("policy_setup"): + teleop_cmd = teleop_policy.get_action() + + wbc_goal = {} + + # Note that wbc_goal["navigation_cmd'] could be overwritten by teleop_cmd + if teleop_cmd: + for key, value in teleop_cmd.items(): + wbc_goal[key] = value + # Draw IK indicators + if config.ik_indicator: + env.set_ik_indicator(teleop_cmd) + if wbc_goal: + wbc_policy.set_goal(wbc_goal) + + # Measure policy action calculation time + with telemetry.timer("policy_action"): + wbc_action = wbc_policy.get_action() + + if config.ci_test: + ci_test_manager.check_upper_body_motion(robot_model, wbc_action, config) + + # Measure action queue time + with telemetry.timer("step"): + obs, _, _, _, step_info = env.step(wbc_action) + env.render() + episode_manager.increment_step() + + if config.ci_test and config.ci_test_mode == "pre_merge": + ci_test_manager.check_end_effector_tracking( + teleop_cmd, obs, config, episode_manager.get_step_count() + ) + + # Handle data collection trigger + episode_manager.handle_collection_trigger(wbc_goal, keyboard_input, step_info) + + # Collect data frame + if episode_manager.should_collect_data(): + frame = generate_frame( + obs, + wbc_action, + seed, + mujoco_state, + mujoco_state_len, + max_mujoco_state_len, + teleop_cmd, + wbc_goal, + config.save_img_obs, + ) + # exporting data + exporter.add_frame(frame) + + # if done and task_completion_hold_count is 0, save the episode + need_reset = episode_manager.check_export_and_completion(exporter) + + # check data abort + if episode_manager.handle_skip(wbc_goal, keyboard_input, exporter): + need_reset = True + + if need_reset: + if config.ci_test: + print("CI test: Completed...") + raise KeyboardInterrupt + + seed = int(time.time()) + env.reset(seed) + env.render() + + print("Sleeping for 3 seconds before resetting teleop policy...") + for j in range(3, 0, -1): + print(f"Starting in {j}...") + time.sleep(1) + + wbc_policy, teleop_policy = get_policies(config, robot_type, robot_model) + episode_manager.reset_step_count() + + rate.sleep() + + except ros_manager.exceptions() as e: + print(f"ROSManager interrupted by user: {e}") + finally: + # Cleanup resources + teleop_policy.close() + dispatcher.stop() + ros_manager.shutdown() + env.close() + print("Sync sim data collection loop terminated.") + + return True + + +if __name__ == "__main__": + config = tyro.cli(SyncSimDataCollectionConfig) + main(config) diff --git a/decoupled_wbc/control/main/teleop/run_teleop_policy_loop.py b/decoupled_wbc/control/main/teleop/run_teleop_policy_loop.py new file mode 100644 index 0000000..ad18b9f --- /dev/null +++ b/decoupled_wbc/control/main/teleop/run_teleop_policy_loop.py @@ -0,0 +1,110 @@ +import time + +import rclpy +import tyro + +from decoupled_wbc.control.main.constants import CONTROL_GOAL_TOPIC +from decoupled_wbc.control.main.teleop.configs.configs import TeleopConfig +from decoupled_wbc.control.policy.lerobot_replay_policy import LerobotReplayPolicy +from decoupled_wbc.control.policy.teleop_policy import TeleopPolicy +from decoupled_wbc.control.robot_model.instantiation.g1 import instantiate_g1_robot_model +from decoupled_wbc.control.teleop.solver.hand.instantiation.g1_hand_ik_instantiation import ( + instantiate_g1_hand_ik_solver, +) +from decoupled_wbc.control.teleop.teleop_retargeting_ik import TeleopRetargetingIK +from decoupled_wbc.control.utils.ros_utils import ROSManager, ROSMsgPublisher +from decoupled_wbc.control.utils.telemetry import Telemetry + +TELEOP_NODE_NAME = "TeleopPolicy" + + +def main(config: TeleopConfig): + ros_manager = ROSManager(node_name=TELEOP_NODE_NAME) + node = ros_manager.node + + if config.robot == "g1": + waist_location = "lower_and_upper_body" if config.enable_waist else "lower_body" + robot_model = instantiate_g1_robot_model( + waist_location=waist_location, high_elbow_pose=config.high_elbow_pose + ) + left_hand_ik_solver, right_hand_ik_solver = instantiate_g1_hand_ik_solver() + else: + raise ValueError(f"Unsupported robot name: {config.robot}") + + if config.lerobot_replay_path: + teleop_policy = LerobotReplayPolicy( + robot_model=robot_model, parquet_path=config.lerobot_replay_path + ) + else: + print("running teleop policy, waiting teleop policy to be initialized...") + retargeting_ik = TeleopRetargetingIK( + robot_model=robot_model, + left_hand_ik_solver=left_hand_ik_solver, + right_hand_ik_solver=right_hand_ik_solver, + enable_visualization=config.enable_visualization, + body_active_joint_groups=["upper_body"], + ) + teleop_policy = TeleopPolicy( + robot_model=robot_model, + retargeting_ik=retargeting_ik, + body_control_device=config.body_control_device, + hand_control_device=config.hand_control_device, + body_streamer_ip=config.body_streamer_ip, # vive tracker, leap motion does not require + body_streamer_keyword=config.body_streamer_keyword, + enable_real_device=config.enable_real_device, + replay_data_path=config.teleop_replay_path, + ) + + # Create a publisher for the navigation commands + control_publisher = ROSMsgPublisher(CONTROL_GOAL_TOPIC) + + # Create rate controller + rate = node.create_rate(config.teleop_frequency) + iteration = 0 + time_to_get_to_initial_pose = 2 # seconds + + telemetry = Telemetry(window_size=100) + + try: + while rclpy.ok(): + with telemetry.timer("total_loop"): + t_start = time.monotonic() + # Get the current teleop action + with telemetry.timer("get_action"): + data = teleop_policy.get_action() + + # Add timing information to the message + t_now = time.monotonic() + data["timestamp"] = t_now + + # Set target completion time - longer for initial pose, then match control frequency + if iteration == 0: + data["target_time"] = t_now + time_to_get_to_initial_pose + else: + data["target_time"] = t_now + (1 / config.teleop_frequency) + + # Publish the teleop command + with telemetry.timer("publish_teleop_command"): + control_publisher.publish(data) + + # For the initial pose, wait the full duration before continuing + if iteration == 0: + print(f"Moving to initial pose for {time_to_get_to_initial_pose} seconds") + time.sleep(time_to_get_to_initial_pose) + iteration += 1 + end_time = time.monotonic() + if (end_time - t_start) > (1 / config.teleop_frequency): + telemetry.log_timing_info(context="Teleop Policy Loop Missed", threshold=0.001) + rate.sleep() + + except ros_manager.exceptions() as e: + print(f"ROSManager interrupted by user: {e}") + + finally: + print("Cleaning up...") + ros_manager.shutdown() + + +if __name__ == "__main__": + config = tyro.cli(TeleopConfig) + main(config) diff --git a/gr00t_wbc/control/policy/__init__.py b/decoupled_wbc/control/policy/__init__.py similarity index 100% rename from gr00t_wbc/control/policy/__init__.py rename to decoupled_wbc/control/policy/__init__.py diff --git a/decoupled_wbc/control/policy/g1_decoupled_whole_body_policy.py b/decoupled_wbc/control/policy/g1_decoupled_whole_body_policy.py new file mode 100644 index 0000000..d3f457f --- /dev/null +++ b/decoupled_wbc/control/policy/g1_decoupled_whole_body_policy.py @@ -0,0 +1,157 @@ +import time as time_module +from typing import Optional + +import numpy as np +from pinocchio import rpy + +from decoupled_wbc.control.base.policy import Policy +from decoupled_wbc.control.main.constants import DEFAULT_NAV_CMD + + +class G1DecoupledWholeBodyPolicy(Policy): + """ + This class implements a whole-body policy for the G1 robot by combining an upper-body + policy and a lower-body RL-based policy. + It is designed to work with the G1 robot's specific configuration and control requirements. + """ + + def __init__( + self, + robot_model, + lower_body_policy: Policy, + upper_body_policy: Policy, + ): + self.robot_model = robot_model + self.lower_body_policy = lower_body_policy + self.upper_body_policy = upper_body_policy + self.last_goal_time = time_module.monotonic() + self.is_in_teleop_mode = False # Track if lower body is in teleop mode + + def set_observation(self, observation): + # Upper body policy is open loop (just interpolation), so we don't need to set the observation + self.lower_body_policy.set_observation(observation) + + def set_goal(self, goal): + """ + Set the goal for both upper and lower body policies. + + Args: + goal: Command from the planners + goal["target_upper_body_pose"]: Target pose for the upper body policy + goal["target_time"]: Target goal time + goal["interpolation_garbage_collection_time"]: Waypoints earlier than this time are removed + goal["navigate_cmd"]: Target navigation velocities for the lower body policy + goal["base_height_command"]: Target base height for both upper and lower body policies + """ + # Update goal timestamp for timeout safety + self.last_goal_time = time_module.monotonic() + + upper_body_goal = {} + lower_body_goal = {} + + # Upper body goal keys + upper_body_keys = [ + "target_upper_body_pose", + "base_height_command", + "target_time", + "interpolation_garbage_collection_time", + "navigate_cmd", + ] + for key in upper_body_keys: + if key in goal: + upper_body_goal[key] = goal[key] + + # Always ensure navigate_cmd is present to prevent interpolation from old dangerous values + if "navigate_cmd" not in goal: + # Safety: Inject safe default navigate_cmd to ensure interpolation goes to stop + if "target_time" in goal and isinstance(goal["target_time"], list): + upper_body_goal["navigate_cmd"] = [np.array(DEFAULT_NAV_CMD)] * len( + goal["target_time"] + ) + else: + upper_body_goal["navigate_cmd"] = np.array(DEFAULT_NAV_CMD) + + # Set teleop policy command flag + has_teleop_commands = ("navigate_cmd" in goal) or ("base_height_command" in goal) + self.is_in_teleop_mode = has_teleop_commands # Track teleop state for timeout safety + self.lower_body_policy.set_use_teleop_policy_cmd(has_teleop_commands) + + # Lower body goal keys + lower_body_keys = [ + "toggle_stand_command", + "toggle_policy_action", + ] + for key in lower_body_keys: + if key in goal: + lower_body_goal[key] = goal[key] + + self.upper_body_policy.set_goal(upper_body_goal) + self.lower_body_policy.set_goal(lower_body_goal) + + def get_action(self, time: Optional[float] = None): + current_time = time if time is not None else time_module.monotonic() + + # Safety timeout: Only apply when in teleop mode (communication loss dangerous) + # When in keyboard mode, no timeout needed (user controls directly) + if self.is_in_teleop_mode: + time_since_goal = current_time - self.last_goal_time + if time_since_goal > 1.0: # 1 second timeout + print( + f"SAFETY: Teleop mode timeout after {time_since_goal:.1f}s, injecting safe goal" + ) + # Inject safe goal to trigger all safety mechanisms (gear_wbc reset + interpolation reset) + safe_goal = { + "target_time": current_time + 0.1, + "interpolation_garbage_collection_time": current_time - 1.0, + } + self.set_goal( + safe_goal + ) # This will reset is_in_teleop_mode to False and trigger all safety + + # Get indices for groups + lower_body_indices = self.robot_model.get_joint_group_indices("lower_body") + upper_body_indices = self.robot_model.get_joint_group_indices("upper_body") + + # Initialize full configuration with zeros + q = np.zeros(self.robot_model.num_dofs) + + upper_body_action = self.upper_body_policy.get_action(time) + q[upper_body_indices] = upper_body_action["target_upper_body_pose"] + q_arms = q[self.robot_model.get_joint_group_indices("arms")] + base_height_command = upper_body_action.get("base_height_command", None) + interpolated_navigate_cmd = upper_body_action.get("navigate_cmd", None) + + # Compute torso orientation relative to waist, to pass to lower body policy + self.robot_model.cache_forward_kinematics(q, auto_clip=False) + torso_orientation = self.robot_model.frame_placement("torso_link").rotation + waist_orientation = self.robot_model.frame_placement("pelvis").rotation + # Extract yaw from rotation matrix and create a rotation with only yaw + # The rotation property is a 3x3 numpy array + waist_yaw = np.arctan2(waist_orientation[1, 0], waist_orientation[0, 0]) + # Create a rotation matrix with only yaw using Pinocchio's rpy functions + waist_yaw_only_rotation = rpy.rpyToMatrix(0, 0, waist_yaw) + yaw_only_waist_from_torso = waist_yaw_only_rotation.T @ torso_orientation + torso_orientation_rpy = rpy.matrixToRpy(yaw_only_waist_from_torso) + + lower_body_action = self.lower_body_policy.get_action( + time, q_arms, base_height_command, torso_orientation_rpy, interpolated_navigate_cmd + ) + + # If pelvis is both in upper and lower body, lower body policy takes preference + q[lower_body_indices] = lower_body_action["body_action"][0][ + : len(lower_body_indices) + ] # lower body (legs + waist) + + self.last_action = {"q": q} + + return {"q": q} + + def handle_keyboard_button(self, key): + try: + self.lower_body_policy.locomotion_policy.handle_keyboard_button(key) + except AttributeError: + # Only catch AttributeError, let other exceptions propagate + self.lower_body_policy.handle_keyboard_button(key) + + def activate_policy(self): + self.handle_keyboard_button("]") diff --git a/decoupled_wbc/control/policy/g1_gear_wbc_policy.py b/decoupled_wbc/control/policy/g1_gear_wbc_policy.py new file mode 100644 index 0000000..02021d6 --- /dev/null +++ b/decoupled_wbc/control/policy/g1_gear_wbc_policy.py @@ -0,0 +1,295 @@ +import collections +from pathlib import Path +from typing import Any, Dict, Optional + +import numpy as np +import onnxruntime as ort +import torch + +from decoupled_wbc.control.base.policy import Policy +from decoupled_wbc.control.utils.gear_wbc_utils import get_gravity_orientation, load_config + + +class G1GearWbcPolicy(Policy): + """Simple G1 robot policy using OpenGearWbc trained neural network.""" + + def __init__(self, robot_model, config: str, model_path: str): + """Initialize G1GearWbcPolicy. + + Args: + config_path: Path to gear_wbc YAML configuration file + """ + self.config, self.LEGGED_GYM_ROOT_DIR = load_config(config) + self.robot_model = robot_model + self.use_teleop_policy_cmd = False + + package_root = Path(__file__).resolve().parents[2] + self.sim2mujoco_root_dir = str(package_root / "sim2mujoco") + model_path_1, model_path_2 = model_path.split(",") + + self.policy_1 = self.load_onnx_policy( + self.sim2mujoco_root_dir + "/resources/robots/g1/" + model_path_1 + ) + self.policy_2 = self.load_onnx_policy( + self.sim2mujoco_root_dir + "/resources/robots/g1/" + model_path_2 + ) + + # Initialize observation history buffer + self.observation = None + self.obs_history = collections.deque(maxlen=self.config["obs_history_len"]) + self.obs_buffer = np.zeros(self.config["num_obs"], dtype=np.float32) + self.counter = 0 + + # Initialize state variables + self.use_policy_action = False + self.action = np.zeros(self.config["num_actions"], dtype=np.float32) + self.target_dof_pos = self.config["default_angles"].copy() + self.cmd = self.config["cmd_init"].copy() + self.height_cmd = self.config["height_cmd"] + self.freq_cmd = self.config["freq_cmd"] + self.roll_cmd = self.config["rpy_cmd"][0] + self.pitch_cmd = self.config["rpy_cmd"][1] + self.yaw_cmd = self.config["rpy_cmd"][2] + self.gait_indices = torch.zeros((1), dtype=torch.float32) + + def load_onnx_policy(self, model_path: str): + print(f"Loading ONNX policy from {model_path}") + model = ort.InferenceSession(model_path) + + def run_inference(input_tensor): + ort_inputs = {model.get_inputs()[0].name: input_tensor.cpu().numpy()} + ort_outs = model.run(None, ort_inputs) + return torch.tensor(ort_outs[0], device="cpu") + + print(f"Successfully loaded ONNX policy from {model_path}") + + return run_inference + + def compute_observation(self, observation: Dict[str, Any]) -> tuple[np.ndarray, int]: + """Compute the observation vector from current state""" + # Get body joint indices (excluding waist roll and pitch) + self.gait_indices = torch.remainder(self.gait_indices + 0.02 * self.freq_cmd, 1.0) + durations = torch.full_like(self.gait_indices, 0.5) + phases = 0.5 + foot_indices = [ + self.gait_indices + phases, # FL + self.gait_indices, # FR + ] + self.foot_indices = torch.remainder( + torch.cat([foot_indices[i].unsqueeze(1) for i in range(2)], dim=1), 1.0 + ) + for fi in foot_indices: + stance = fi < durations + swing = fi >= durations + fi[stance] = fi[stance] * (0.5 / durations[stance]) + fi[swing] = 0.5 + (fi[swing] - durations[swing]) * (0.5 / (1 - durations[swing])) + + self.clock_inputs = torch.stack([torch.sin(2 * np.pi * fi) for fi in foot_indices], dim=1) + + body_indices = self.robot_model.get_joint_group_indices("body") + body_indices = [idx for idx in body_indices] + + n_joints = len(body_indices) + + # Extract joint data + qj = observation["q"][body_indices].copy() + dqj = observation["dq"][body_indices].copy() + + # Extract floating base data + quat = observation["floating_base_pose"][3:7].copy() # quaternion + omega = observation["floating_base_vel"][3:6].copy() # angular velocity + + # Handle default angles padding + if len(self.config["default_angles"]) < n_joints: + padded_defaults = np.zeros(n_joints, dtype=np.float32) + padded_defaults[: len(self.config["default_angles"])] = self.config["default_angles"] + else: + padded_defaults = self.config["default_angles"][:n_joints] + + # Scale the values + qj_scaled = (qj - padded_defaults) * self.config["dof_pos_scale"] + dqj_scaled = dqj * self.config["dof_vel_scale"] + gravity_orientation = get_gravity_orientation(quat) + omega_scaled = omega * self.config["ang_vel_scale"] + + # Calculate single observation dimension + single_obs_dim = 86 # 3 + 1 + 3 + 3 + 3 + n_joints + n_joints + 15, n_joints = 29 + + # Create single observation + single_obs = np.zeros(single_obs_dim, dtype=np.float32) + single_obs[0:3] = self.cmd[:3] * self.config["cmd_scale"] + single_obs[3:4] = np.array([self.height_cmd]) + single_obs[4:7] = np.array([self.roll_cmd, self.pitch_cmd, self.yaw_cmd]) + single_obs[7:10] = omega_scaled + single_obs[10:13] = gravity_orientation + # single_obs[14:17] = omega_scaled_torso + # single_obs[17:20] = gravity_torso + single_obs[13 : 13 + n_joints] = qj_scaled + single_obs[13 + n_joints : 13 + 2 * n_joints] = dqj_scaled + single_obs[13 + 2 * n_joints : 13 + 2 * n_joints + 15] = self.action + # single_obs[13 + 2 * n_joints + 15 : 13 + 2 * n_joints + 15 + 2] = ( + # processed_clock_inputs.detach().cpu().numpy() + # ) + return single_obs, single_obs_dim + + def set_observation(self, observation: Dict[str, Any]): + """Update the policy's current observation of the environment. + + Args: + observation: Dictionary containing single observation from current state + Should include 'obs' key with current single observation + """ + + # Extract the single observation + self.observation = observation + single_obs, single_obs_dim = self.compute_observation(observation) + + # Update observation history every control_decimation steps + # if self.counter % self.config['control_decimation'] == 0: + # Add current observation to history + self.obs_history.append(single_obs) + + # Fill history with zeros if not enough observations yet + while len(self.obs_history) < self.config["obs_history_len"]: + self.obs_history.appendleft(np.zeros_like(single_obs)) + + # Construct full observation with history + single_obs_dim = len(single_obs) + for i, hist_obs in enumerate(self.obs_history): + start_idx = i * single_obs_dim + end_idx = start_idx + single_obs_dim + self.obs_buffer[start_idx:end_idx] = hist_obs + + # Convert to tensor for policy + self.obs_tensor = torch.from_numpy(self.obs_buffer).unsqueeze(0) + # self.counter += 1 + + assert self.obs_tensor.shape[1] == self.config["num_obs"] + + def set_use_teleop_policy_cmd(self, use_teleop_policy_cmd: bool): + self.use_teleop_policy_cmd = use_teleop_policy_cmd + # Safety: When teleop is disabled, reset navigation to stop + if not use_teleop_policy_cmd: + self.nav_cmd = self.config["cmd_init"].copy() # Reset to safe default + + def set_goal(self, goal: Dict[str, Any]): + """Set the goal for the policy. + + Args: + goal: Dictionary containing the goal for the policy + """ + + if "toggle_policy_action" in goal: + if goal["toggle_policy_action"]: + self.use_policy_action = not self.use_policy_action + + def get_action( + self, + time: Optional[float] = None, + arms_target_pose: Optional[np.ndarray] = None, + base_height_command: Optional[np.ndarray] = None, + torso_orientation_rpy: Optional[np.ndarray] = None, + interpolated_navigate_cmd: Optional[np.ndarray] = None, + ) -> Dict[str, Any]: + """Compute and return the next action based on current observation. + + Args: + time: Optional "monotonic time" for time-dependent policies (unused) + + Returns: + Dictionary containing the action to be executed + """ + if self.obs_tensor is None: + raise ValueError("No observation set. Call set_observation() first.") + + if base_height_command is not None and self.use_teleop_policy_cmd: + self.height_cmd = ( + base_height_command[0] + if isinstance(base_height_command, list) + else base_height_command + ) + + if interpolated_navigate_cmd is not None and self.use_teleop_policy_cmd: + self.cmd = interpolated_navigate_cmd + + if torso_orientation_rpy is not None and self.use_teleop_policy_cmd: + self.roll_cmd = torso_orientation_rpy[0] + self.pitch_cmd = torso_orientation_rpy[1] + self.yaw_cmd = torso_orientation_rpy[2] + + # Run policy inference + with torch.no_grad(): + # Select appropriate policy based on command magnitude + if np.linalg.norm(self.cmd) < 0.05: + # Use standing policy for small commands + policy = self.policy_1 + else: + # Use walking policy for movement commands + policy = self.policy_2 + + self.action = policy(self.obs_tensor).detach().numpy().squeeze() + + # Transform action to target_dof_pos + if self.use_policy_action: + cmd_q = self.action * self.config["action_scale"] + self.config["default_angles"] + else: + cmd_q = self.observation["q"][self.robot_model.get_joint_group_indices("lower_body")] + + cmd_dq = np.zeros(self.config["num_actions"]) + cmd_tau = np.zeros(self.config["num_actions"]) + + return {"body_action": (cmd_q, cmd_dq, cmd_tau)} + + def handle_keyboard_button(self, key): + if key == "]": + self.use_policy_action = True + elif key == "o": + self.use_policy_action = False + elif key == "w": + self.cmd[0] += 0.2 + elif key == "s": + self.cmd[0] -= 0.2 + elif key == "a": + self.cmd[1] += 0.2 + elif key == "d": + self.cmd[1] -= 0.2 + elif key == "q": + self.cmd[2] += 0.2 + elif key == "e": + self.cmd[2] -= 0.2 + elif key == "z": + self.cmd[0] = 0.0 + self.cmd[1] = 0.0 + self.cmd[2] = 0.0 + elif key == "1": + self.height_cmd += 0.1 + elif key == "2": + self.height_cmd -= 0.1 + elif key == "n": + self.freq_cmd -= 0.1 + self.freq_cmd = max(1.0, self.freq_cmd) + elif key == "m": + self.freq_cmd += 0.1 + self.freq_cmd = min(2.0, self.freq_cmd) + elif key == "3": + self.roll_cmd -= np.deg2rad(10) + elif key == "4": + self.roll_cmd += np.deg2rad(10) + elif key == "5": + self.pitch_cmd -= np.deg2rad(10) + elif key == "6": + self.pitch_cmd += np.deg2rad(10) + elif key == "7": + self.yaw_cmd -= np.deg2rad(10) + elif key == "8": + self.yaw_cmd += np.deg2rad(10) + + if key: + print("--------------------------------") + print(f"Linear velocity command: {self.cmd}") + print(f"Base height command: {self.height_cmd}") + print(f"Use policy action: {self.use_policy_action}") + print(f"roll deg angle: {np.rad2deg(self.roll_cmd)}") + print(f"pitch deg angle: {np.rad2deg(self.pitch_cmd)}") + print(f"yaw deg angle: {np.rad2deg(self.yaw_cmd)}") + print(f"Gait frequency: {self.freq_cmd}") diff --git a/decoupled_wbc/control/policy/identity_policy.py b/decoupled_wbc/control/policy/identity_policy.py new file mode 100644 index 0000000..53c955d --- /dev/null +++ b/decoupled_wbc/control/policy/identity_policy.py @@ -0,0 +1,25 @@ +from copy import deepcopy +from typing import Optional + +import gymnasium as gym + +from decoupled_wbc.control.base.policy import Policy + + +class IdentityPolicy(Policy): + def __init__(self): + self.reset() + + def get_action(self, time: Optional[float] = None) -> dict[str, any]: + return self.goal + + def set_goal(self, goal: dict[str, any]) -> None: + self.goal = deepcopy(goal) + self.goal.pop("interpolation_garbage_collection_time", None) + self.goal.pop("target_time", None) + + def observation_space(self) -> gym.spaces.Dict: + return gym.spaces.Dict() + + def action_space(self) -> gym.spaces.Dict: + return gym.spaces.Dict() diff --git a/decoupled_wbc/control/policy/interpolation_policy.py b/decoupled_wbc/control/policy/interpolation_policy.py new file mode 100644 index 0000000..03b88e7 --- /dev/null +++ b/decoupled_wbc/control/policy/interpolation_policy.py @@ -0,0 +1,297 @@ +import numbers +import time as time_module +from typing import Any, Dict, Optional, Union + +import gymnasium as gym +import numpy as np +import scipy.interpolate as si + +from decoupled_wbc.control.base.policy import Policy + + +class InterpolationPolicy(Policy): + def __init__( + self, + init_time: float, + init_values: dict[str, np.ndarray], + max_change_rate: float, + ): + """ + Args: + init_time: The time of recording the initial values. + init_values: The initial values of the features. + The keys are the names of the features, and the values + are the initial values of the features (1D array). + max_change_rate: The maximum change rate. + """ + super().__init__() + self.last_action = init_values # Vecs are 1D arrays + self.concat_order = sorted(init_values.keys()) + self.concat_dims = [] + for key in self.concat_order: + vec = np.array(init_values[key]) + if vec.ndim == 2 and vec.shape[0] == 1: + vec = vec[0] + init_values[key] = vec + assert vec.ndim == 1, f"The shape of {key} should be (D,). Got {vec.shape}." + self.concat_dims.append(vec.shape[0]) + + self.init_values_concat = self._concat_vecs(init_values, 1) + self.max_change_rate = max_change_rate + self.reset(init_time) + + def reset(self, init_time: float = time_module.monotonic()): + self.interp = PoseTrajectoryInterpolator(np.array([init_time]), self.init_values_concat) + self.last_waypoint_time = init_time + self.max_change_rate = self.max_change_rate + + def _concat_vecs(self, values: dict[str, np.ndarray], length: int) -> np.ndarray: + """ + Concatenate the vectors into a 2D array to be used for interpolation. + Args: + values: The values to concatenate. + length: The length of the concatenated vectors (time dimension). + Returns: + The concatenated vectors (T, D) arrays. + """ + concat_vecs = [] + for key in self.concat_order: + if key in values: + vec = np.array(values[key]) + if vec.ndim == 1: + # If the vector is 1D, tile it to the length of the time dimension + vec = np.tile(vec, (length, 1)) + assert vec.ndim == 2, f"The shape of {key} should be (T, D). Got {vec.shape}." + concat_vecs.append(vec) + else: + # If the vector is not in the values, use the last action + # Since the last action is 1D, we need to tile it to the length of the time dimension + concat_vecs.append(np.tile(self.last_action[key], (length, 1))) + return np.concatenate(concat_vecs, axis=1) # Vecs are 2D (T, D) arrays + + def _unconcat_vecs(self, concat_vec: np.ndarray) -> dict[str, np.ndarray]: + curr_idx = 0 + action = {} + assert ( + concat_vec.ndim == 1 + ), f"The shape of the concatenated vectors should be (T, D). Got {concat_vec.shape}." + for key, dim in zip(self.concat_order, self.concat_dims): + action[key] = concat_vec[curr_idx : curr_idx + dim] + curr_idx += dim + return action # Vecs are 1D arrays + + def __call__( + self, observation: Dict[str, Any], goal: Dict[str, Any], time: float + ) -> Dict[str, np.ndarray]: + raise NotImplementedError( + "`InterpolationPolicy` accepts goal and provide action in two separate methods." + ) + + def set_goal(self, goal: Dict[str, Any]) -> None: + if "target_time" not in goal: + return + assert ( + "interpolation_garbage_collection_time" in goal + ), "`interpolation_garbage_collection_time` is required." + target_time = goal.pop("target_time") + interpolation_garbage_collection_time = goal.pop("interpolation_garbage_collection_time") + + if isinstance(target_time, list): + for key, vec in goal.items(): + assert isinstance(vec, list) + assert len(vec) == len(target_time), ( + f"The length of {key} and `target_time` should be the same. " + f"Got {len(vec)} and {len(target_time)}." + ) + else: + target_time = [target_time] + for key in goal: + goal[key] = [goal[key]] + + # Concatenate all vectors in goal + concat_vecs = self._concat_vecs(goal, len(target_time)) + assert concat_vecs.shape[0] == len(target_time), ( + f"The length of the concatenated goal and `target_time` should be the same. " + f"Got {concat_vecs.shape[0]} and {len(target_time)}." + ) + + for tt, vec in zip(target_time, concat_vecs): + if tt < interpolation_garbage_collection_time: + continue + self.interp = self.interp.schedule_waypoint( + pose=vec, + time=tt, + max_change_rate=self.max_change_rate, + interpolation_garbage_collection_time=interpolation_garbage_collection_time, + last_waypoint_time=self.last_waypoint_time, + ) + self.last_waypoint_time = tt + + def get_action(self, time: Optional[float] = None) -> dict[str, Any]: + """Get the next action based on the (current) monotonic time.""" + if time is None: + time = time_module.monotonic() + concat_vec = self.interp(time) + self.last_action.update(self._unconcat_vecs(concat_vec)) + return self.last_action + + def observation_space(self) -> gym.spaces.Dict: + """Return the observation space.""" + pass + + def action_space(self) -> gym.spaces.Dict: + """Return the action space.""" + pass + + def close(self) -> None: + """Clean up resources.""" + pass + + +class PoseTrajectoryInterpolator: + def __init__(self, times: np.ndarray, poses: np.ndarray): + assert len(times) >= 1 + assert len(poses) == len(times) + + times = np.asarray(times) + poses = np.asarray(poses) + + self.num_joint = len(poses[0]) + + if len(times) == 1: + # special treatment for single step interpolation + self.single_step = True + self._times = times + self._poses = poses + else: + self.single_step = False + assert np.all(times[1:] >= times[:-1]) + self.pose_interp = si.interp1d(times, poses, axis=0, assume_sorted=True) + + @property + def times(self) -> np.ndarray: + if self.single_step: + return self._times + else: + return self.pose_interp.x + + @property + def poses(self) -> np.ndarray: + if self.single_step: + return self._poses + else: + return self.pose_interp.y + + def trim(self, start_t: float, end_t: float) -> "PoseTrajectoryInterpolator": + assert start_t <= end_t + times = self.times + should_keep = (start_t < times) & (times < end_t) + keep_times = times[should_keep] + all_times = np.concatenate([[start_t], keep_times, [end_t]]) + # remove duplicates, Slerp requires strictly increasing x + all_times = np.unique(all_times) + # interpolate + all_poses = self(all_times) + return PoseTrajectoryInterpolator(times=all_times, poses=all_poses) + + def schedule_waypoint( + self, + pose, + time, + max_change_rate=np.inf, + interpolation_garbage_collection_time=None, + last_waypoint_time=None, + ) -> "PoseTrajectoryInterpolator": + if not isinstance(max_change_rate, np.ndarray): + max_change_rate = np.array([max_change_rate] * self.num_joint) + + assert len(max_change_rate) == self.num_joint + assert np.max(max_change_rate) > 0 + + if last_waypoint_time is not None: + assert interpolation_garbage_collection_time is not None + + # trim current interpolator to between interpolation_garbage_collection_time and last_waypoint_time + start_time = self.times[0] + end_time = self.times[-1] + assert start_time <= end_time + if interpolation_garbage_collection_time is not None: + if time <= interpolation_garbage_collection_time: + # if insert time is earlier than current time + # no effect should be done to the interpolator + return self + # now, interpolation_garbage_collection_time < time + start_time = max(interpolation_garbage_collection_time, start_time) + + if last_waypoint_time is not None: + # if last_waypoint_time is earlier than start_time + # use start_time + if time <= last_waypoint_time: + end_time = interpolation_garbage_collection_time + else: + end_time = max(last_waypoint_time, interpolation_garbage_collection_time) + else: + end_time = interpolation_garbage_collection_time + + end_time = min(end_time, time) + start_time = min(start_time, end_time) + # end time should be the latest of all times except time + # after this we can assume order (proven by zhenjia, due to the 2 min operations) + + # Constraints: + # start_time <= end_time <= time (proven by zhenjia) + # interpolation_garbage_collection_time <= start_time (proven by zhenjia) + # interpolation_garbage_collection_time <= time (proven by zhenjia) + + # time can't change + # last_waypoint_time can't change + # interpolation_garbage_collection_time can't change + assert start_time <= end_time + assert end_time <= time + if last_waypoint_time is not None: + if time <= last_waypoint_time: + assert end_time == interpolation_garbage_collection_time + else: + assert end_time == max(last_waypoint_time, interpolation_garbage_collection_time) + + if interpolation_garbage_collection_time is not None: + assert interpolation_garbage_collection_time <= start_time + assert interpolation_garbage_collection_time <= time + trimmed_interp = self.trim(start_time, end_time) + # after this, all waypoints in trimmed_interp is within start_time and end_time + # and is earlier than time + + # determine speed + duration = time - end_time + end_pose = trimmed_interp(end_time) + pose_min_duration = np.max(np.abs(end_pose - pose) / max_change_rate) + duration = max(duration, pose_min_duration) + assert duration >= 0 + last_waypoint_time = end_time + duration + + # insert new pose + times = np.append(trimmed_interp.times, [last_waypoint_time], axis=0) + poses = np.append(trimmed_interp.poses, [pose], axis=0) + + # create new interpolator + final_interp = PoseTrajectoryInterpolator(times, poses) + return final_interp + + def __call__(self, t: Union[numbers.Number, np.ndarray]) -> np.ndarray: + is_single = False + if isinstance(t, numbers.Number): + is_single = True + t = np.array([t]) + + pose = np.zeros((len(t), self.num_joint)) + if self.single_step: + pose[:] = self._poses[0] + else: + start_time = self.times[0] + end_time = self.times[-1] + t = np.clip(t, start_time, end_time) + pose = self.pose_interp(t) + + if is_single: + pose = pose[0] + return pose diff --git a/decoupled_wbc/control/policy/keyboard_navigation_policy.py b/decoupled_wbc/control/policy/keyboard_navigation_policy.py new file mode 100644 index 0000000..e93e080 --- /dev/null +++ b/decoupled_wbc/control/policy/keyboard_navigation_policy.py @@ -0,0 +1,87 @@ +from typing import Any, Dict, Optional + +import numpy as np + +from decoupled_wbc.control.base.policy import Policy + + +class KeyboardNavigationPolicy(Policy): + def __init__( + self, + max_linear_velocity: float = 0.5, + max_angular_velocity: float = 0.5, + verbose: bool = True, + **kwargs, + ): + """ + Initialize the navigation policy. + + Args: + max_linear_velocity: Maximum linear velocity in m/s (for x and y components) + max_angular_velocity: Maximum angular velocity in rad/s (for yaw component) + **kwargs: Additional arguments passed to the base Policy class + """ + super().__init__(**kwargs) + self.max_linear_velocity = max_linear_velocity + self.max_angular_velocity = max_angular_velocity + self.verbose = verbose + + # Initialize velocity commands + self.lin_vel_command = np.zeros(2, dtype=np.float32) # [vx, vy] + self.ang_vel_command = np.zeros(1, dtype=np.float32) # [wz] + + def get_action(self, time: Optional[float] = None) -> Dict[str, Any]: + """ + Get the action to execute based on current state. + + Args: + time: Current time (optional) + + Returns: + Dict containing the action to execute with: + - navigate_cmd: np.array([vx, vy, wz]) where: + - vx: linear velocity in x direction (m/s) + - vy: linear velocity in y direction (m/s) + - wz: angular velocity around z axis (rad/s) + """ + # Combine linear and angular velocities into a single command + # Ensure velocities are within limits + vx = np.clip(self.lin_vel_command[0], -self.max_linear_velocity, self.max_linear_velocity) + vy = np.clip(self.lin_vel_command[1], -self.max_linear_velocity, self.max_linear_velocity) + wz = np.clip(self.ang_vel_command[0], -self.max_angular_velocity, self.max_angular_velocity) + + navigate_cmd = np.array([vx, vy, wz], dtype=np.float32) + + action = {"navigate_cmd": navigate_cmd} + return action + + def handle_keyboard_button(self, keycode: str): + """ + Handle keyboard inputs for navigation control. + + Args: + keycode: The key that was pressed + """ + if keycode == "w": + self.lin_vel_command[0] += 0.1 # Increase forward velocity + elif keycode == "s": + self.lin_vel_command[0] -= 0.1 # Increase backward velocity + elif keycode == "a": + self.lin_vel_command[1] += 0.1 # Increase left velocity + elif keycode == "d": + self.lin_vel_command[1] -= 0.1 # Increase right velocity + elif keycode == "q": + self.ang_vel_command[0] += 0.1 # Increase counter-clockwise rotation + elif keycode == "e": + self.ang_vel_command[0] -= 0.1 # Increase clockwise rotation + elif keycode == "z": + # Reset all velocities + self.lin_vel_command[:] = 0.0 + self.ang_vel_command[:] = 0.0 + if self.verbose: + print("Navigation policy: Reset all velocity commands to zero") + + # Print current velocities after any keyboard input + if self.verbose: + print(f"Nav lin vel: ({self.lin_vel_command[0]:.2f}, {self.lin_vel_command[1]:.2f})") + print(f"Nav ang vel: {self.ang_vel_command[0]:.2f}") diff --git a/decoupled_wbc/control/policy/lerobot_replay_policy.py b/decoupled_wbc/control/policy/lerobot_replay_policy.py new file mode 100644 index 0000000..b650bc9 --- /dev/null +++ b/decoupled_wbc/control/policy/lerobot_replay_policy.py @@ -0,0 +1,111 @@ +import time + +import pandas as pd + +from decoupled_wbc.control.base.policy import Policy +from decoupled_wbc.control.main.constants import ( + DEFAULT_BASE_HEIGHT, + DEFAULT_NAV_CMD, + DEFAULT_WRIST_POSE, +) +from decoupled_wbc.control.robot_model.robot_model import RobotModel +from decoupled_wbc.data.viz.rerun_viz import RerunViz + + +class LerobotReplayPolicy(Policy): + """Replay policy for Lerobot dataset, so we can replay the dataset + and just use the action from the dataset. + + Args: + parquet_path: Path to the parquet file containing the dataset. + """ + + is_active = True # by default, the replay policy is active + + def __init__(self, robot_model: RobotModel, parquet_path: str, use_viz: bool = False): + # self.dataset = LerobotDataset(dataset_path) + self.parquet_path = parquet_path + self._ctr = 0 + # read the parquet file + self.df = pd.read_parquet(self.parquet_path) + self._max_ctr = len(self.df) + # get the action from the dataframe + self.action = self.df.iloc[self._ctr]["action"] + self.use_viz = use_viz + if self.use_viz: + self.viz = RerunViz( + image_keys=["egoview_image"], + tensor_keys=[ + "left_arm_qpos", + "left_hand_qpos", + "right_arm_qpos", + "right_hand_qpos", + ], + window_size=5.0, + ) + self.robot_model = robot_model + self.upper_body_joint_indices = self.robot_model.get_joint_group_indices("upper_body") + + def get_action(self) -> dict[str, any]: + # get the action from the dataframe + action = self.df.iloc[self._ctr]["action"] + wrist_pose = self.df.iloc[self._ctr]["action.eef"] + navigate_cmd = self.df.iloc[self._ctr].get("teleop.navigate_command", DEFAULT_NAV_CMD) + base_height_cmd = self.df.iloc[self._ctr].get( + "teleop.base_height_command", DEFAULT_BASE_HEIGHT + ) + + self._ctr += 1 + if self._ctr >= self._max_ctr: + self._ctr = 0 + # print(f"Replay {self._ctr} / {self._max_ctr}") + if self.use_viz: + self.viz.plot_tensors( + { + "left_arm_qpos": action[self.robot_model.get_joint_group_indices("left_arm")] + + 15, + "left_hand_qpos": action[self.robot_model.get_joint_group_indices("left_hand")] + + 15, + "right_arm_qpos": action[self.robot_model.get_joint_group_indices("right_arm")] + + 15, + "right_hand_qpos": action[ + self.robot_model.get_joint_group_indices("right_hand") + ] + + 15, + }, + time.monotonic(), + ) + + return { + "target_upper_body_pose": action[self.upper_body_joint_indices], + "wrist_pose": wrist_pose, + "navigate_cmd": navigate_cmd, + "base_height_cmd": base_height_cmd, + "timestamp": time.time(), + } + + def action_to_cmd(self, action: dict[str, any]) -> dict[str, any]: + action["target_upper_body_pose"] = action["q"][ + self.robot_model.get_joint_group_indices("upper_body") + ] + del action["q"] + return action + + def set_observation(self, observation: dict[str, any]): + pass + + def get_observation(self) -> dict[str, any]: + return { + "wrist_pose": self.df.iloc[self._ctr - 1].get( + "observation.eef_state", DEFAULT_WRIST_POSE + ), + "timestamp": time.time(), + } + + +if __name__ == "__main__": + policy = LerobotReplayPolicy( + parquet_path="outputs/g1-open-hands-may7/data/chunk-000/episode_000000.parquet" + ) + action = policy.get_action() + print(action) diff --git a/decoupled_wbc/control/policy/teleop_policy.py b/decoupled_wbc/control/policy/teleop_policy.py new file mode 100644 index 0000000..e05a43b --- /dev/null +++ b/decoupled_wbc/control/policy/teleop_policy.py @@ -0,0 +1,207 @@ +from contextlib import contextmanager +import time +from typing import Optional + +import numpy as np +from scipy.spatial.transform import Rotation as R + +from decoupled_wbc.control.base.policy import Policy +from decoupled_wbc.control.robot_model import RobotModel +from decoupled_wbc.control.teleop.teleop_retargeting_ik import TeleopRetargetingIK +from decoupled_wbc.control.teleop.teleop_streamer import TeleopStreamer + + +class TeleopPolicy(Policy): + """ + Robot-agnostic teleop policy. + Clean separation: IK processing vs command passing. + All robot-specific properties are abstracted through robot_model and hand_ik_solvers. + """ + + def __init__( + self, + body_control_device: str, + hand_control_device: str, + robot_model: RobotModel, + retargeting_ik: TeleopRetargetingIK, + body_streamer_ip: str = "192.168.?.?", + body_streamer_keyword: str = "shoulder", + enable_real_device: bool = True, + replay_data_path: Optional[str] = None, + replay_speed: float = 1.0, + wait_for_activation: int = 5, + activate_keyboard_listener: bool = True, + ): + if activate_keyboard_listener: + from decoupled_wbc.control.utils.keyboard_dispatcher import KeyboardListenerSubscriber + + self.keyboard_listener = KeyboardListenerSubscriber() + else: + self.keyboard_listener = None + + self.wait_for_activation = wait_for_activation + + self.teleop_streamer = TeleopStreamer( + robot_model=robot_model, + body_control_device=body_control_device, + hand_control_device=hand_control_device, + enable_real_device=enable_real_device, + body_streamer_ip=body_streamer_ip, + body_streamer_keyword=body_streamer_keyword, + replay_data_path=replay_data_path, + replay_speed=replay_speed, + ) + self.robot_model = robot_model + self.retargeting_ik = retargeting_ik + self.is_active = False + + self.latest_left_wrist_data = np.eye(4) + self.latest_right_wrist_data = np.eye(4) + self.latest_left_fingers_data = {"position": np.zeros((25, 4, 4))} + self.latest_right_fingers_data = {"position": np.zeros((25, 4, 4))} + + def set_goal(self, goal: dict[str, any]): + # The current teleop policy doesn't take higher level commands yet. + pass + + def get_action(self) -> dict[str, any]: + # Get structured data + streamer_output = self.teleop_streamer.get_streamer_data() + + # Handle activation using teleop_data commands + self.check_activation( + streamer_output.teleop_data, wait_for_activation=self.wait_for_activation + ) + + action = {} + + # Process streamer data if active + if self.is_active and streamer_output.ik_data: + body_data = streamer_output.ik_data["body_data"] + left_hand_data = streamer_output.ik_data["left_hand_data"] + right_hand_data = streamer_output.ik_data["right_hand_data"] + + left_wrist_name = self.robot_model.supplemental_info.hand_frame_names["left"] + right_wrist_name = self.robot_model.supplemental_info.hand_frame_names["right"] + self.latest_left_wrist_data = body_data[left_wrist_name] + self.latest_right_wrist_data = body_data[right_wrist_name] + self.latest_left_fingers_data = left_hand_data + self.latest_right_fingers_data = right_hand_data + + # TODO: This stores the same data again + ik_data = { + "body_data": body_data, + "left_hand_data": left_hand_data, + "right_hand_data": right_hand_data, + } + action["ik_data"] = ik_data + + # Wrist poses (pos and quat) + # TODO: This stores the same wrist poses in two different formats + left_wrist_matrix = self.latest_left_wrist_data + right_wrist_matrix = self.latest_right_wrist_data + left_wrist_pose = np.concatenate( + [ + left_wrist_matrix[:3, 3], + R.from_matrix(left_wrist_matrix[:3, :3]).as_quat(scalar_first=True), + ] + ) + right_wrist_pose = np.concatenate( + [ + right_wrist_matrix[:3, 3], + R.from_matrix(right_wrist_matrix[:3, :3]).as_quat(scalar_first=True), + ] + ) + + # Combine IK results with control commands (no teleop_data commands) + action.update( + { + "left_wrist": self.latest_left_wrist_data, + "right_wrist": self.latest_right_wrist_data, + "left_fingers": self.latest_left_fingers_data, + "right_fingers": self.latest_right_fingers_data, + "wrist_pose": np.concatenate([left_wrist_pose, right_wrist_pose]), + **streamer_output.control_data, # Only control & data collection commands pass through + **streamer_output.data_collection_data, + } + ) + + # Run retargeting IK + if "ik_data" in action: + self.retargeting_ik.set_goal(action["ik_data"]) + action["target_upper_body_pose"] = self.retargeting_ik.get_action() + + return action + + def close(self) -> bool: + self.teleop_streamer.stop_streaming() + return True + + def check_activation(self, teleop_data: dict, wait_for_activation: int = 5): + """Activation logic only looks at teleop data commands""" + key = self.keyboard_listener.read_msg() if self.keyboard_listener else "" + toggle_activation_by_keyboard = key == "l" + reset_teleop_policy_by_keyboard = key == "k" + toggle_activation_by_teleop = teleop_data.get("toggle_activation", False) + + if reset_teleop_policy_by_keyboard: + print("Resetting teleop policy") + self.reset() + + if toggle_activation_by_keyboard or toggle_activation_by_teleop: + self.is_active = not self.is_active + if self.is_active: + print("Starting teleop policy") + + if wait_for_activation > 0 and toggle_activation_by_keyboard: + print(f"Sleeping for {wait_for_activation} seconds before starting teleop...") + for i in range(wait_for_activation, 0, -1): + print(f"Starting in {i}...") + time.sleep(1) + + # dda: calibration logic should use current IK data + self.teleop_streamer.calibrate() + print("Teleop policy calibrated") + else: + print("Stopping teleop policy") + + @contextmanager + def activate(self): + try: + yield self + finally: + self.close() + + def handle_keyboard_button(self, keycode): + """ + Handle keyboard input with proper state toggle. + """ + if keycode == "l": + # Toggle start state + self.is_active = not self.is_active + # Reset initialization when stopping + if not self.is_active: + self._initialized = False + if keycode == "k": + print("Resetting teleop policy") + self.reset() + + def activate_policy(self, wait_for_activation: int = 5): + """activate the teleop policy""" + self.is_active = False + self.check_activation( + teleop_data={"toggle_activation": True}, wait_for_activation=wait_for_activation + ) + + def reset(self, wait_for_activation: int = 5, auto_activate: bool = False): + """Reset the teleop policy to the initial state, and re-activate it.""" + self.teleop_streamer.reset() + self.retargeting_ik.reset() + self.is_active = False + self.latest_left_wrist_data = np.eye(4) + self.latest_right_wrist_data = np.eye(4) + self.latest_left_fingers_data = {"position": np.zeros((25, 4, 4))} + self.latest_right_fingers_data = {"position": np.zeros((25, 4, 4))} + + if auto_activate: + self.activate_policy(wait_for_activation) diff --git a/decoupled_wbc/control/policy/wbc_policy_factory.py b/decoupled_wbc/control/policy/wbc_policy_factory.py new file mode 100644 index 0000000..bccbd48 --- /dev/null +++ b/decoupled_wbc/control/policy/wbc_policy_factory.py @@ -0,0 +1,65 @@ +import os +from pathlib import Path +import time + +import numpy as np + +import decoupled_wbc +from decoupled_wbc.control.main.constants import DEFAULT_BASE_HEIGHT, DEFAULT_NAV_CMD +from decoupled_wbc.control.policy.g1_gear_wbc_policy import G1GearWbcPolicy +from decoupled_wbc.control.policy.identity_policy import IdentityPolicy +from decoupled_wbc.control.policy.interpolation_policy import InterpolationPolicy + +from .g1_decoupled_whole_body_policy import G1DecoupledWholeBodyPolicy + +WBC_VERSIONS = ["gear_wbc"] + + +def get_wbc_policy( + robot_type, + robot_model, + wbc_config, + init_time=time.monotonic(), +): + current_upper_body_pose = robot_model.get_initial_upper_body_pose() + + if robot_type == "g1": + upper_body_policy_type = wbc_config.get("upper_body_policy_type", "interpolation") + if upper_body_policy_type == "identity": + upper_body_policy = IdentityPolicy() + else: + upper_body_policy = InterpolationPolicy( + init_time=init_time, + init_values={ + "target_upper_body_pose": current_upper_body_pose, + "base_height_command": np.array([DEFAULT_BASE_HEIGHT]), + "navigate_cmd": np.array([DEFAULT_NAV_CMD]), + }, + max_change_rate=wbc_config["upper_body_max_joint_speed"], + ) + + lower_body_policy_type = wbc_config.get("VERSION", "gear_wbc") + if lower_body_policy_type not in ["gear_wbc"]: + raise ValueError( + f"Invalid lower body policy version: {lower_body_policy_type}. " + f"Only 'gear_wbc' is supported." + ) + + # Get the base path to decoupled_wbc and convert to Path object + package_path = Path(os.path.dirname(decoupled_wbc.__file__)) + gear_wbc_config = str(package_path / ".." / wbc_config["GEAR_WBC_CONFIG"]) + if lower_body_policy_type == "gear_wbc": + lower_body_policy = G1GearWbcPolicy( + robot_model=robot_model, + config=gear_wbc_config, + model_path=wbc_config["model_path"], + ) + + wbc_policy = G1DecoupledWholeBodyPolicy( + robot_model=robot_model, + upper_body_policy=upper_body_policy, + lower_body_policy=lower_body_policy, + ) + else: + raise ValueError(f"Invalid robot type: {robot_type}") + return wbc_policy diff --git a/gr00t_wbc/control/robot_model/__init__.py b/decoupled_wbc/control/robot_model/__init__.py similarity index 100% rename from gr00t_wbc/control/robot_model/__init__.py rename to decoupled_wbc/control/robot_model/__init__.py diff --git a/gr00t_wbc/control/robot_model/instantiation/__init__.py b/decoupled_wbc/control/robot_model/instantiation/__init__.py similarity index 100% rename from gr00t_wbc/control/robot_model/instantiation/__init__.py rename to decoupled_wbc/control/robot_model/instantiation/__init__.py diff --git a/decoupled_wbc/control/robot_model/instantiation/g1.py b/decoupled_wbc/control/robot_model/instantiation/g1.py new file mode 100644 index 0000000..23bd805 --- /dev/null +++ b/decoupled_wbc/control/robot_model/instantiation/g1.py @@ -0,0 +1,62 @@ +import os +from pathlib import Path +from typing import Literal + +from decoupled_wbc.control.robot_model.robot_model import RobotModel +from decoupled_wbc.control.robot_model.supplemental_info.g1.g1_supplemental_info import ( + ElbowPose, + G1SupplementalInfo, + WaistLocation, +) + + +def instantiate_g1_robot_model( + waist_location: Literal["lower_body", "upper_body", "lower_and_upper_body"] = "lower_body", + high_elbow_pose: bool = False, +): + """ + Instantiate a G1 robot model with configurable waist location and pose. + + Args: + waist_location: Whether to put waist in "lower_body" (default G1 behavior), + "upper_body" (waist controlled with arms/manipulation via IK), + or "lower_and_upper_body" (waist reference from arms/manipulation + via IK then passed to lower body policy) + high_elbow_pose: Whether to use high elbow pose configuration for default joint positions + + Returns: + RobotModel: Configured G1 robot model + """ + project_root = Path(__file__).resolve().parent.parent.parent.parent.parent + robot_model_config = { + "asset_path": os.path.join(project_root, "decoupled_wbc/control/robot_model/model_data/g1"), + "urdf_path": os.path.join( + project_root, "decoupled_wbc/control/robot_model/model_data/g1/g1_29dof_with_hand.urdf" + ), + } + assert waist_location in [ + "lower_body", + "upper_body", + "lower_and_upper_body", + ], f"Invalid waist_location: {waist_location}. Must be 'lower_body' or 'upper_body' or 'lower_and_upper_body'" + + # Map string values to enums + waist_location_enum = { + "lower_body": WaistLocation.LOWER_BODY, + "upper_body": WaistLocation.UPPER_BODY, + "lower_and_upper_body": WaistLocation.LOWER_AND_UPPER_BODY, + }[waist_location] + + elbow_pose_enum = ElbowPose.HIGH if high_elbow_pose else ElbowPose.LOW + + # Create single configurable supplemental info instance + robot_model_supplemental_info = G1SupplementalInfo( + waist_location=waist_location_enum, elbow_pose=elbow_pose_enum + ) + + robot_model = RobotModel( + robot_model_config["urdf_path"], + robot_model_config["asset_path"], + supplemental_info=robot_model_supplemental_info, + ) + return robot_model diff --git a/gr00t_wbc/control/robot_model/model_data/g1/g1_29dof.urdf b/decoupled_wbc/control/robot_model/model_data/g1/g1_29dof.urdf similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/g1_29dof.urdf rename to decoupled_wbc/control/robot_model/model_data/g1/g1_29dof.urdf diff --git a/gr00t_wbc/control/robot_model/model_data/g1/g1_29dof_old.xml b/decoupled_wbc/control/robot_model/model_data/g1/g1_29dof_old.xml similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/g1_29dof_old.xml rename to decoupled_wbc/control/robot_model/model_data/g1/g1_29dof_old.xml diff --git a/gr00t_wbc/control/robot_model/model_data/g1/g1_29dof_with_hand.urdf b/decoupled_wbc/control/robot_model/model_data/g1/g1_29dof_with_hand.urdf similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/g1_29dof_with_hand.urdf rename to decoupled_wbc/control/robot_model/model_data/g1/g1_29dof_with_hand.urdf diff --git a/gr00t_wbc/control/robot_model/model_data/g1/g1_29dof_with_hand.xml b/decoupled_wbc/control/robot_model/model_data/g1/g1_29dof_with_hand.xml similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/g1_29dof_with_hand.xml rename to decoupled_wbc/control/robot_model/model_data/g1/g1_29dof_with_hand.xml diff --git a/gr00t_wbc/control/robot_model/model_data/g1/g1_29dof_with_hand_rev_1_0_activatedfinger.xml b/decoupled_wbc/control/robot_model/model_data/g1/g1_29dof_with_hand_rev_1_0_activatedfinger.xml similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/g1_29dof_with_hand_rev_1_0_activatedfinger.xml rename to decoupled_wbc/control/robot_model/model_data/g1/g1_29dof_with_hand_rev_1_0_activatedfinger.xml diff --git a/gr00t_wbc/control/robot_model/model_data/g1/lift_box_43dof.xml b/decoupled_wbc/control/robot_model/model_data/g1/lift_box_43dof.xml similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/lift_box_43dof.xml rename to decoupled_wbc/control/robot_model/model_data/g1/lift_box_43dof.xml diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/head_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/head_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/head_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/head_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_ankle_pitch_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_ankle_pitch_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_ankle_pitch_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_ankle_pitch_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_ankle_roll_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_ankle_roll_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_ankle_roll_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_ankle_roll_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_elbow_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_elbow_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_elbow_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_elbow_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_elbow_link_merge.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_elbow_link_merge.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_elbow_link_merge.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_elbow_link_merge.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hand_index_0_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hand_index_0_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hand_index_0_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hand_index_0_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hand_index_1_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hand_index_1_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hand_index_1_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hand_index_1_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hand_middle_0_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hand_middle_0_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hand_middle_0_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hand_middle_0_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hand_middle_1_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hand_middle_1_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hand_middle_1_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hand_middle_1_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hand_palm_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hand_palm_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hand_palm_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hand_palm_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hand_thumb_0_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hand_thumb_0_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hand_thumb_0_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hand_thumb_0_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hand_thumb_1_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hand_thumb_1_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hand_thumb_1_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hand_thumb_1_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hand_thumb_2_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hand_thumb_2_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hand_thumb_2_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hand_thumb_2_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hip_pitch_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hip_pitch_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hip_pitch_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hip_pitch_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hip_roll_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hip_roll_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hip_roll_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hip_roll_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hip_yaw_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hip_yaw_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_hip_yaw_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_hip_yaw_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_knee_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_knee_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_knee_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_knee_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_rubber_hand.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_rubber_hand.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_rubber_hand.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_rubber_hand.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_shoulder_pitch_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_shoulder_pitch_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_shoulder_pitch_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_shoulder_pitch_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_shoulder_roll_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_shoulder_roll_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_shoulder_roll_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_shoulder_roll_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_shoulder_yaw_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_shoulder_yaw_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_shoulder_yaw_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_shoulder_yaw_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_wrist_pitch_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_wrist_pitch_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_wrist_pitch_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_wrist_pitch_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_wrist_roll_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_wrist_roll_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_wrist_roll_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_wrist_roll_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_wrist_roll_rubber_hand.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_wrist_roll_rubber_hand.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_wrist_roll_rubber_hand.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_wrist_roll_rubber_hand.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/left_wrist_yaw_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/left_wrist_yaw_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/left_wrist_yaw_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/left_wrist_yaw_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/logo_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/logo_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/logo_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/logo_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/pelvis.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/pelvis.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/pelvis.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/pelvis.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/pelvis_contour_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/pelvis_contour_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/pelvis_contour_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/pelvis_contour_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_ankle_pitch_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_ankle_pitch_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_ankle_pitch_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_ankle_pitch_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_ankle_roll_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_ankle_roll_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_ankle_roll_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_ankle_roll_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_elbow_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_elbow_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_elbow_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_elbow_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_elbow_link_merge.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_elbow_link_merge.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_elbow_link_merge.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_elbow_link_merge.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hand_index_0_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hand_index_0_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hand_index_0_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hand_index_0_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hand_index_1_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hand_index_1_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hand_index_1_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hand_index_1_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hand_middle_0_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hand_middle_0_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hand_middle_0_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hand_middle_0_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hand_middle_1_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hand_middle_1_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hand_middle_1_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hand_middle_1_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hand_palm_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hand_palm_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hand_palm_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hand_palm_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hand_thumb_0_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hand_thumb_0_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hand_thumb_0_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hand_thumb_0_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hand_thumb_1_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hand_thumb_1_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hand_thumb_1_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hand_thumb_1_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hand_thumb_2_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hand_thumb_2_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hand_thumb_2_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hand_thumb_2_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hip_pitch_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hip_pitch_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hip_pitch_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hip_pitch_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hip_roll_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hip_roll_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hip_roll_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hip_roll_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hip_yaw_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hip_yaw_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_hip_yaw_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_hip_yaw_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_knee_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_knee_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_knee_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_knee_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_rubber_hand.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_rubber_hand.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_rubber_hand.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_rubber_hand.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_shoulder_pitch_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_shoulder_pitch_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_shoulder_pitch_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_shoulder_pitch_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_shoulder_roll_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_shoulder_roll_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_shoulder_roll_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_shoulder_roll_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_shoulder_yaw_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_shoulder_yaw_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_shoulder_yaw_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_shoulder_yaw_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_wrist_pitch_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_wrist_pitch_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_wrist_pitch_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_wrist_pitch_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_wrist_roll_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_wrist_roll_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_wrist_roll_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_wrist_roll_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_wrist_roll_rubber_hand.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_wrist_roll_rubber_hand.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_wrist_roll_rubber_hand.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_wrist_roll_rubber_hand.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/right_wrist_yaw_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/right_wrist_yaw_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/right_wrist_yaw_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/right_wrist_yaw_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/torso_constraint_L_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/torso_constraint_L_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/torso_constraint_L_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/torso_constraint_L_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/torso_constraint_L_rod_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/torso_constraint_L_rod_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/torso_constraint_L_rod_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/torso_constraint_L_rod_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/torso_constraint_R_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/torso_constraint_R_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/torso_constraint_R_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/torso_constraint_R_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/torso_constraint_R_rod_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/torso_constraint_R_rod_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/torso_constraint_R_rod_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/torso_constraint_R_rod_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/torso_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/torso_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/torso_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/torso_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/torso_link_rev_1_0.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/torso_link_rev_1_0.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/torso_link_rev_1_0.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/torso_link_rev_1_0.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/waist_constraint_L.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/waist_constraint_L.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/waist_constraint_L.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/waist_constraint_L.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/waist_constraint_R.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/waist_constraint_R.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/waist_constraint_R.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/waist_constraint_R.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/waist_roll_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/waist_roll_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/waist_roll_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/waist_roll_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/waist_roll_link_rev_1_0.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/waist_roll_link_rev_1_0.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/waist_roll_link_rev_1_0.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/waist_roll_link_rev_1_0.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/waist_support_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/waist_support_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/waist_support_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/waist_support_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/waist_yaw_link.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/waist_yaw_link.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/waist_yaw_link.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/waist_yaw_link.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/meshes/waist_yaw_link_rev_1_0.STL b/decoupled_wbc/control/robot_model/model_data/g1/meshes/waist_yaw_link_rev_1_0.STL similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/meshes/waist_yaw_link_rev_1_0.STL rename to decoupled_wbc/control/robot_model/model_data/g1/meshes/waist_yaw_link_rev_1_0.STL diff --git a/gr00t_wbc/control/robot_model/model_data/g1/pnp_bottle_43dof.xml b/decoupled_wbc/control/robot_model/model_data/g1/pnp_bottle_43dof.xml similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/pnp_bottle_43dof.xml rename to decoupled_wbc/control/robot_model/model_data/g1/pnp_bottle_43dof.xml diff --git a/gr00t_wbc/control/robot_model/model_data/g1/pnp_cube_43dof.xml b/decoupled_wbc/control/robot_model/model_data/g1/pnp_cube_43dof.xml similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/pnp_cube_43dof.xml rename to decoupled_wbc/control/robot_model/model_data/g1/pnp_cube_43dof.xml diff --git a/gr00t_wbc/control/robot_model/model_data/g1/scene_29dof.xml b/decoupled_wbc/control/robot_model/model_data/g1/scene_29dof.xml similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/scene_29dof.xml rename to decoupled_wbc/control/robot_model/model_data/g1/scene_29dof.xml diff --git a/gr00t_wbc/control/robot_model/model_data/g1/scene_29dof_activated3dex.xml b/decoupled_wbc/control/robot_model/model_data/g1/scene_29dof_activated3dex.xml similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/scene_29dof_activated3dex.xml rename to decoupled_wbc/control/robot_model/model_data/g1/scene_29dof_activated3dex.xml diff --git a/gr00t_wbc/control/robot_model/model_data/g1/scene_43dof.xml b/decoupled_wbc/control/robot_model/model_data/g1/scene_43dof.xml similarity index 100% rename from gr00t_wbc/control/robot_model/model_data/g1/scene_43dof.xml rename to decoupled_wbc/control/robot_model/model_data/g1/scene_43dof.xml diff --git a/decoupled_wbc/control/robot_model/robot_model.py b/decoupled_wbc/control/robot_model/robot_model.py new file mode 100644 index 0000000..27a1ed5 --- /dev/null +++ b/decoupled_wbc/control/robot_model/robot_model.py @@ -0,0 +1,772 @@ +from typing import List, Optional, Set, Union + +import numpy as np +import pinocchio as pin + +from decoupled_wbc.control.robot_model.supplemental_info import RobotSupplementalInfo + + +class RobotModel: + def __init__( + self, + urdf_path, + asset_path, + set_floating_base=False, + supplemental_info: Optional[RobotSupplementalInfo] = None, + ): + self.pinocchio_wrapper = pin.RobotWrapper.BuildFromURDF( + filename=urdf_path, + package_dirs=[asset_path], + root_joint=pin.JointModelFreeFlyer() if set_floating_base else None, + ) + self.is_floating_base_model = set_floating_base + + self.joint_to_dof_index = {} + # Assume we only have single-dof joints + # First two names correspond to universe and floating base joints + names = ( + self.pinocchio_wrapper.model.names[2:] + if set_floating_base + else self.pinocchio_wrapper.model.names[1:] + ) + for name in names: + j_id = self.pinocchio_wrapper.model.getJointId(name) + jmodel = self.pinocchio_wrapper.model.joints[j_id] + self.joint_to_dof_index[name] = jmodel.idx_q + + # Store joint limits only for actual joints (excluding floating base) + # if set floating base is true and the robot can move in the world + # then we don't want to impose joint limits for the 7 dofs corresponding + # to the floating base dofs. + root_nq = 7 if set_floating_base else 0 + self.upper_joint_limits = self.pinocchio_wrapper.model.upperPositionLimit[root_nq:].copy() + self.lower_joint_limits = self.pinocchio_wrapper.model.lowerPositionLimit[root_nq:].copy() + + # Set up supplemental info if provided + self.supplemental_info = supplemental_info + if self.supplemental_info is not None: + # Cache indices for body and hand actuated joints separately + self._body_actuated_joint_indices = [ + self.dof_index(name) for name in self.supplemental_info.body_actuated_joints + ] + self._left_hand_actuated_joint_indices = [ + self.dof_index(name) for name in self.supplemental_info.left_hand_actuated_joints + ] + self._right_hand_actuated_joint_indices = [ + self.dof_index(name) for name in self.supplemental_info.right_hand_actuated_joints + ] + self._hand_actuated_joint_indices = ( + self._left_hand_actuated_joint_indices + self._right_hand_actuated_joint_indices + ) + + # Cache indices for joint groups, handling nested groups + self._joint_group_indices = {} + for group_name, group_info in self.supplemental_info.joint_groups.items(): + indices = [] + # Add indices for direct joints + indices.extend([self.dof_index(name) for name in group_info["joints"]]) + # Add indices from subgroups + for subgroup_name in group_info["groups"]: + indices.extend(self.get_joint_group_indices(subgroup_name)) + self._joint_group_indices[group_name] = sorted(set(indices)) + + # Update joint limits from supplemental info if available + if ( + hasattr(self.supplemental_info, "joint_limits") + and self.supplemental_info.joint_limits + ): + for joint_name, limits in self.supplemental_info.joint_limits.items(): + if joint_name in self.joint_to_dof_index: + idx = self.joint_to_dof_index[joint_name] - root_nq + self.lower_joint_limits[idx] = limits[0] + self.upper_joint_limits[idx] = limits[1] + + # Initialize default body pose + self.default_body_pose = self.q_zero.copy() + + # Update with supplemental info if available + if self.supplemental_info is not None: + default_joint_q = self.supplemental_info.default_joint_q + for joint, joint_values in default_joint_q.items(): + # Get the joint name mapping for this type + joint_mapping = self.supplemental_info.joint_name_mapping[joint] + + # Handle both single joint names and left/right mappings + if isinstance(joint_mapping, str): + # Single joint (e.g., waist joints) + if joint_mapping in self.joint_to_dof_index: + joint_idx = self.dof_index(joint_mapping) + self.default_body_pose[joint_idx] = ( + joint_values # joint_values is the value for single joints + ) + else: + # Left/right mapping (e.g., arm joints) + for side, value in joint_values.items(): + if side in joint_mapping and joint_mapping[side] in self.joint_to_dof_index: + joint_idx = self.dof_index(joint_mapping[side]) + self.default_body_pose[joint_idx] = value + + # Initialize initial body pose + self.initial_body_pose = self.default_body_pose.copy() + + @property + def num_dofs(self) -> int: + """Get the number of degrees of freedom of the robot (floating base pose + joints).""" + return self.pinocchio_wrapper.model.nq + + @property + def q_zero(self) -> np.ndarray: + """Get the zero pose of the robot.""" + return self.pinocchio_wrapper.q0 + + @property + def joint_names(self) -> List[str]: + """Get the names of the joints of the robot.""" + return list(self.joint_to_dof_index.keys()) + + @property + def num_joints(self) -> int: + """Get the number of joints of the robot.""" + return len(self.joint_to_dof_index) + + def dof_index(self, joint_name: str) -> int: + """ + Get the index in the degrees of freedom vector corresponding + to the single-DoF joint with name `joint_name`. + """ + if joint_name not in self.joint_to_dof_index: + raise ValueError( + f"Unknown joint name: '{joint_name}'. " + f"Available joints: {list(self.joint_to_dof_index.keys())}" + ) + return self.joint_to_dof_index[joint_name] + + def get_body_actuated_joint_indices(self) -> List[int]: + """ + Get the indices of body actuated joints in the full configuration. + Ordering is that of the actuated joints as defined in the supplemental info. + Requires supplemental_info to be provided. + """ + if self.supplemental_info is None: + raise ValueError("supplemental_info must be provided to use this method") + return self._body_actuated_joint_indices + + def get_hand_actuated_joint_indices(self, side: str = "both") -> List[int]: + """ + Get the indices of hand actuated joints in the full configuration. + Ordering is that of the actuated joints as defined in the supplemental info. + Requires supplemental_info to be provided. + + Args: + side: String specifying which hand to get indices for ('left', 'right', or 'both') + """ + if self.supplemental_info is None: + raise ValueError("supplemental_info must be provided to use this method") + + if side.lower() == "both": + return self._hand_actuated_joint_indices + elif side.lower() == "left": + return self._left_hand_actuated_joint_indices + elif side.lower() == "right": + return self._right_hand_actuated_joint_indices + else: + raise ValueError("side must be 'left', 'right', or 'both'") + + def get_joint_group_indices(self, group_names: Union[str, Set[str]]) -> List[int]: + """ + Get the indices of joints in one or more groups in the full configuration. + Requires supplemental_info to be provided. + The returned indices are sorted in ascending order, so that the joint ordering + of the full model is preserved. + + Args: + group_names: Either a single group name (str) or a set of group names (Set[str]) + + Returns: + List of joint indices in sorted order with no duplicates + """ + if self.supplemental_info is None: + raise ValueError("supplemental_info must be provided to use this method") + + # Convert single string to set for uniform handling + if isinstance(group_names, str): + group_names = {group_names} + + # Collect indices from all groups + all_indices = set() + for group_name in group_names: + if group_name not in self._joint_group_indices: + raise ValueError(f"Unknown joint group: {group_name}") + all_indices.update(self._joint_group_indices[group_name]) + + return sorted(all_indices) + + def cache_forward_kinematics(self, q: np.ndarray, auto_clip=True) -> None: + """ + Perform forward kinematics to update the pose of every joint and frame + in the Pinocchio data structures for the given configuration `q`. + + :param q: A numpy array of shape (num_dofs,) representing the robot configuration. + """ + if q.shape[0] != self.num_dofs: + raise ValueError(f"Expected q of length {self.num_dofs}, got {q.shape[0]} instead.") + + # Apply auto-clip if enabled + if auto_clip: + q = self.clip_configuration(q) + + pin.framesForwardKinematics(self.pinocchio_wrapper.model, self.pinocchio_wrapper.data, q) + + def compute_gravity_compensation_torques( + self, q: np.ndarray, joint_groups: Union[str, List[str], Set[str]] = None, auto_clip=True + ) -> np.ndarray: + """ + Compute gravity compensation torques for specified joint groups using pinocchio. + + :param q: Robot configuration (joint positions) + :param joint_groups: Joint groups to compensate (e.g., "arms", ["left_arm", "waist"], + {"left_arm", "waist"}). If None, compensates all joints + :param auto_clip: Whether to automatically clip joint values to limits + :return: Array of gravity compensation torques for all DOFs (zero for non-compensated joints) + """ + if q.shape[0] != self.num_dofs: + raise ValueError(f"Expected q of length {self.num_dofs}, got {q.shape[0]} instead.") + + # Apply auto-clip if enabled + if auto_clip: + q = self.clip_configuration(q) + + try: + # Cache forward kinematics for the current configuration + self.cache_forward_kinematics(q, auto_clip=False) # Already clipped if needed + + # Compute gravity vector using RNEA with zero velocity and acceleration + v = np.zeros(self.num_dofs) + a = np.zeros(self.num_dofs) + + gravity_torques_full = pin.rnea( + self.pinocchio_wrapper.model, self.pinocchio_wrapper.data, q, v, a + ) + + # If no joint groups specified, return full gravity torques + if joint_groups is None: + return gravity_torques_full + + # Convert list to set for get_joint_group_indices compatibility + if isinstance(joint_groups, list): + joint_groups = set(joint_groups) + + # Get joint indices for specified groups - get_joint_group_indices handles str and Set[str] + try: + compensated_joint_indices = self.get_joint_group_indices(joint_groups) + except ValueError as e: + raise ValueError(f"Error resolving joint groups {joint_groups}: {e}") + + # Create mask for joints that should receive gravity compensation + compensation_mask = np.zeros(self.num_dofs, dtype=bool) + for joint_idx in compensated_joint_indices: + if 0 <= joint_idx < len(compensation_mask): + compensation_mask[joint_idx] = True + + # Apply mask to only compensate specified joints + compensated_torques = np.zeros_like(gravity_torques_full) + compensated_torques[compensation_mask] = gravity_torques_full[compensation_mask] + + return compensated_torques + + except Exception as e: + raise RuntimeError(f"Error computing gravity compensation: {e}") + + def clip_configuration(self, q: np.ndarray, margin: float = 1e-6) -> np.ndarray: + """ + Clip the configuration to stay within joint limits with a small tolerance. + + :param q: Configuration to clip + :param margin: Tolerance to keep away from joint limits + :return: Clipped configuration + """ + q_clipped = q.copy() + + # Only clip joint positions, not floating base + root_nq = 7 if self.is_floating_base_model else 0 + q_clipped[root_nq:] = np.clip( + q[root_nq:], self.lower_joint_limits + margin, self.upper_joint_limits - margin + ) + + return q_clipped + + def frame_placement(self, frame_name: str) -> pin.SE3: + """ + Returns the SE3 transform of the specified frame in the world coordinate system. + Note: make sure cache_forward_kinematics() has been previously called. + + :param frame_name: Name of the frame, e.g. "link_elbow_frame", "hand_imu_frame", etc. + :return: A pin.SE3 object representing the pose of the frame. + """ + model = self.pinocchio_wrapper.model + data = self.pinocchio_wrapper.data + + frame_id = model.getFrameId(frame_name) + if frame_id < 0 or frame_id >= len(model.frames): + valid_frames = [f.name for f in model.frames] + raise ValueError(f"Unknown frame '{frame_name}'. Valid frames: {valid_frames}") + + # Pinocchio's data.oMf[frame_id] is a pin.SE3. + return data.oMf[frame_id].copy() + + def get_body_actuated_joints(self, q: np.ndarray) -> np.ndarray: + """ + Get the configuration of body actuated joints from a full configuration. + + :param q: Configuration in full space + :return: Configuration of body actuated joints + """ + indices = self.get_body_actuated_joint_indices() + + return q[indices] + + def get_hand_actuated_joints(self, q: np.ndarray, side: str = "both") -> np.ndarray: + """ + Get the configuration of hand actuated joints from a full configuration. + + Args: + q: Configuration in full space + side: String specifying which hand to get joints for ('left', 'right', or 'both') + """ + indices = self.get_hand_actuated_joint_indices(side) + return q[indices] + + def get_configuration_from_actuated_joints( + self, + body_actuated_joint_values: np.ndarray, + hand_actuated_joint_values: Optional[np.ndarray] = None, + left_hand_actuated_joint_values: Optional[np.ndarray] = None, + right_hand_actuated_joint_values: Optional[np.ndarray] = None, + ) -> np.ndarray: + """ + Get the full configuration from the body and hand actuated joint configurations. + Can specify either both hands together or left and right hands separately. + + Args: + body_actuated_joint_values: Configuration of body actuated joints + hand_actuated_joint_values: Configuration of both hands' actuated joints (optional) + left_hand_actuated_joint_values: Configuration of left hand actuated joints (optional) + right_hand_actuated_joint_values: Configuration of right hand actuated joints (optional) + + Returns: + Full configuration including body and hand joints + """ + q = self.pinocchio_wrapper.q0.copy() + q[self.get_body_actuated_joint_indices()] = body_actuated_joint_values + + # Handle hand configurations + if hand_actuated_joint_values is not None: + # Use combined hand configuration + q[self.get_hand_actuated_joint_indices("both")] = hand_actuated_joint_values + else: + # Use separate hand configurations + if left_hand_actuated_joint_values is not None: + q[self.get_hand_actuated_joint_indices("left")] = left_hand_actuated_joint_values + if right_hand_actuated_joint_values is not None: + q[self.get_hand_actuated_joint_indices("right")] = right_hand_actuated_joint_values + + return q + + def reset_forward_kinematics(self) -> None: + """ + Reset the forward kinematics to the initial configuration. + """ + self.cache_forward_kinematics(self.q_zero) + + def get_initial_upper_body_pose(self) -> np.ndarray: + """ + Get the initial upper body pose of the robot. + """ + return self.initial_body_pose[self.get_joint_group_indices("upper_body")] + + def get_default_body_pose(self) -> np.ndarray: + """ + Get the default body pose of the robot. + """ + return self.default_body_pose + + def set_initial_body_pose(self, q: np.ndarray, q_idx=None) -> None: + """ + Set the initial body pose of the robot. + """ + if q_idx is None: + self.initial_body_pose = q + else: + self.initial_body_pose[q_idx] = q + + +class ReducedRobotModel(RobotModel): + """ + A class that creates a reduced order robot model by fixing certain joints. + This class maintains a mapping between the reduced state space and the full state space. + """ + + def __init__( + self, + full_robot_model: RobotModel, + fixed_joints: List[str], + fixed_values: Optional[List[float]] = None, + ): + """ + Create a reduced order robot model by fixing specified joints. + + :param full_robot_model: The original robot model + :param fixed_joints: List of joint names to fix + :param fixed_values: Optional list of values to fix the joints to. If None, uses the initial + joint positions (q0) from the full robot model. + """ + self.full_robot = full_robot_model + self.supplemental_info = full_robot_model.supplemental_info + + # If fixed_values is None, use q0 from the full robot model + if fixed_values is None: + fixed_values = [] + for joint_name in fixed_joints: + full_idx = full_robot_model.dof_index(joint_name) + fixed_values.append(full_robot_model.pinocchio_wrapper.q0[full_idx]) + elif len(fixed_joints) != len(fixed_values): + raise ValueError("fixed_joints and fixed_values must have the same length") + + # Store fixed joints and their values + self.fixed_joints = fixed_joints + self.fixed_values = fixed_values + + # Create mapping between reduced and full state spaces + self.reduced_to_full = [] + self.full_to_reduced = {} + + # Initialize with floating base indices if present + if full_robot_model.is_floating_base_model: + self.reduced_to_full.extend(range(7)) # Floating base indices + for i in range(7): + self.full_to_reduced[i] = i + + # Add active joint indices + for joint_name in full_robot_model.joint_names: + if joint_name not in fixed_joints: + full_idx = full_robot_model.dof_index(joint_name) + reduced_idx = len(self.reduced_to_full) + self.reduced_to_full.append(full_idx) + self.full_to_reduced[full_idx] = reduced_idx + + # Create a reduced Pinocchio model using buildReducedModel + # First, get the list of joint IDs to lock + locked_joint_ids = [] + for joint_name in fixed_joints: + joint_id = full_robot_model.pinocchio_wrapper.model.getJointId(joint_name) + if (full_robot_model.is_floating_base_model and joint_id > 1) or ( + not full_robot_model.is_floating_base_model and joint_id > 0 + ): + locked_joint_ids.append(joint_id) + + # First build the reduced kinematic model + reduced_model = pin.buildReducedModel( + full_robot_model.pinocchio_wrapper.model, + locked_joint_ids, + full_robot_model.pinocchio_wrapper.q0, + ) + + # Then build the reduced geometry models using the reduced kinematic model + self.pinocchio_wrapper = pin.RobotWrapper( + model=reduced_model, + ) + + # Create joint to dof index mapping + self.joint_to_dof_index = {} + # Assume we only have single-dof joints + # First two names correspond to universe and floating base joints + names = ( + self.pinocchio_wrapper.model.names[2:] + if self.full_robot.is_floating_base_model + else self.pinocchio_wrapper.model.names[1:] + ) + for name in names: + j_id = self.pinocchio_wrapper.model.getJointId(name) + jmodel = self.pinocchio_wrapper.model.joints[j_id] + self.joint_to_dof_index[name] = jmodel.idx_q + + # Initialize joint limits + root_nq = 7 if self.full_robot.is_floating_base_model else 0 + self.lower_joint_limits = self.pinocchio_wrapper.model.lowerPositionLimit[root_nq:].copy() + self.upper_joint_limits = self.pinocchio_wrapper.model.upperPositionLimit[root_nq:].copy() + + # Update joint limits from supplemental info if available + if self.supplemental_info is not None: + if ( + hasattr(self.supplemental_info, "joint_limits") + and self.supplemental_info.joint_limits + ): + for joint_name, limits in self.supplemental_info.joint_limits.items(): + if joint_name in self.joint_to_dof_index: + idx = self.joint_to_dof_index[joint_name] - root_nq + self.lower_joint_limits[idx] = limits[0] + self.upper_joint_limits[idx] = limits[1] + + # Get full indices for body and hand actuated joints + full_body_indices = full_robot_model.get_body_actuated_joint_indices() + full_hand_indices = full_robot_model.get_hand_actuated_joint_indices("both") + full_left_hand_indices = full_robot_model.get_hand_actuated_joint_indices("left") + full_right_hand_indices = full_robot_model.get_hand_actuated_joint_indices("right") + + # Map to reduced indices + self._body_actuated_joint_indices = [] + for idx in full_body_indices: + if idx in self.full_to_reduced: + self._body_actuated_joint_indices.append(self.full_to_reduced[idx]) + + self._hand_actuated_joint_indices = [] + for idx in full_hand_indices: + if idx in self.full_to_reduced: + self._hand_actuated_joint_indices.append(self.full_to_reduced[idx]) + + self._left_hand_actuated_joint_indices = [] + for idx in full_left_hand_indices: + if idx in self.full_to_reduced: + self._left_hand_actuated_joint_indices.append(self.full_to_reduced[idx]) + + self._right_hand_actuated_joint_indices = [] + for idx in full_right_hand_indices: + if idx in self.full_to_reduced: + self._right_hand_actuated_joint_indices.append(self.full_to_reduced[idx]) + + # Cache indices for joint groups in reduced space + self._joint_group_indices = {} + for group_name in self.supplemental_info.joint_groups: + full_indices = full_robot_model.get_joint_group_indices(group_name) + reduced_indices = [] + for idx in full_indices: + if idx in self.full_to_reduced: + reduced_indices.append(self.full_to_reduced[idx]) + self._joint_group_indices[group_name] = sorted(set(reduced_indices)) + + # Initialize default body pose in reduced space + self.default_body_pose = self.full_to_reduced_configuration( + full_robot_model.default_body_pose + ) + + # Initialize initial body pose in reduced space + self.initial_body_pose = self.full_to_reduced_configuration( + full_robot_model.initial_body_pose + ) + + @property + def num_joints(self) -> int: + """Get the number of active joints in the reduced model.""" + return len(self.joint_names) + + @property + def joint_names(self) -> List[str]: + """Get the names of the active joints in the reduced model.""" + return [name for name in self.full_robot.joint_names if name not in self.fixed_joints] + + @classmethod + def from_fixed_groups( + cls, + full_robot_model: RobotModel, + fixed_group_names: List[str], + fixed_values: Optional[List[float]] = None, + ) -> "ReducedRobotModel": + """ + Create a reduced order robot model by fixing all joints in specified groups. + + :param full_robot_model: The original robot model + :param fixed_group_names: List of joint group names to fix + :param fixed_values: Optional list of values to fix the joints to. If None, uses the initial + joint positions (q0) from the full robot model. + :return: A ReducedRobotModel instance + """ + if full_robot_model.supplemental_info is None: + raise ValueError("supplemental_info must be provided to use this method") + + # Get all joints in the groups, including those from subgroups + fixed_joints = set() # Use a set to avoid duplicates + + for group_name in fixed_group_names: + if group_name not in full_robot_model.supplemental_info.joint_groups: + raise ValueError(f"Unknown joint group: {group_name}") + + group_info = full_robot_model.supplemental_info.joint_groups[group_name] + + # Add direct joints + fixed_joints.update(group_info["joints"]) + + # Add joints from subgroups + for subgroup_name in group_info["groups"]: + subgroup_joints = full_robot_model.get_joint_group_indices(subgroup_name) + fixed_joints.update([full_robot_model.joint_names[idx] for idx in subgroup_joints]) + + # Convert set back to list for compatibility with the original constructor + return cls(full_robot_model, list(fixed_joints), fixed_values) + + @classmethod + def from_fixed_group( + cls, + full_robot_model: RobotModel, + fixed_group_name: str, + fixed_values: Optional[List[float]] = None, + ) -> "ReducedRobotModel": + """ + Create a reduced order robot model by fixing all joints in a specified group. + This is a convenience method that calls from_fixed_groups with a single group. + + :param full_robot_model: The original robot model + :param fixed_group_name: Name of the joint group to fix + :param fixed_values: Optional list of values to fix the joints to. If None, uses the initial + joint positions (q0) from the full robot model. + :return: A ReducedRobotModel instance + """ + return cls.from_fixed_groups(full_robot_model, [fixed_group_name], fixed_values) + + @classmethod + def from_active_group( + cls, + full_robot_model: RobotModel, + active_group_name: str, + fixed_values: Optional[List[float]] = None, + ) -> "ReducedRobotModel": + """ + Create a reduced order robot model by fixing all joints EXCEPT those in the specified group. + This is a convenience method that calls from_active_groups with a single group. + + :param full_robot_model: The original robot model + :param active_group_name: Name of the joint group to keep active (all other joints will be fixed) + :param fixed_values: Optional list of values to fix the joints to. If None, uses the initial + joint positions (q0) from the full robot model. + :return: A ReducedRobotModel instance + """ + return cls.from_active_groups(full_robot_model, [active_group_name], fixed_values) + + @classmethod + def from_active_groups( + cls, + full_robot_model: RobotModel, + active_group_names: List[str], + fixed_values: Optional[List[float]] = None, + ) -> "ReducedRobotModel": + """ + Create a reduced order robot model by fixing all joints EXCEPT those in the specified groups. + This is useful when you want to keep multiple groups active and fix everything else. + + :param full_robot_model: The original robot model + :param active_group_names: List of joint group names to keep active (all other joints will be fixed) + :param fixed_values: Optional list of values to fix the joints to. If None, uses the initial + joint positions (q0) from the full robot model. + :return: A ReducedRobotModel instance + """ + if full_robot_model.supplemental_info is None: + raise ValueError("supplemental_info must be provided to use this method") + + # Get all joints in the active groups, including those from subgroups + active_joints = set() + + def add_group_joints(group_name: str): + if group_name not in full_robot_model.supplemental_info.joint_groups: + raise ValueError(f"Unknown joint group: {group_name}") + + group_info = full_robot_model.supplemental_info.joint_groups[group_name] + + # Add direct joints + if "joints" in group_info: + active_joints.update(group_info["joints"]) + + # Add joints from subgroups + if "groups" in group_info: + for subgroup_name in group_info["groups"]: + add_group_joints(subgroup_name) + + for group_name in active_group_names: + add_group_joints(group_name) + + # Get all joints from the model + all_joints = set(full_robot_model.joint_names) + + # The fixed joints are all joints minus the active joints + fixed_joints = list(all_joints - active_joints) + + return cls(full_robot_model, fixed_joints, fixed_values) + + def reduced_to_full_configuration(self, q_reduced: np.ndarray) -> np.ndarray: + """ + Convert a reduced configuration to the full configuration space. + + :param q_reduced: Configuration in reduced space + :return: Configuration in full space with fixed joints set to their fixed values + """ + if q_reduced.shape[0] != self.num_dofs: + raise ValueError( + f"Expected q_reduced of length {self.num_dofs}, got {q_reduced.shape[0]} instead" + ) + + q_full = np.zeros(self.full_robot.num_dofs) + + # Set active joints + for reduced_idx, full_idx in enumerate(self.reduced_to_full): + q_full[full_idx] = q_reduced[reduced_idx] + + # Set fixed joints + for joint_name, value in zip(self.fixed_joints, self.fixed_values): + full_idx = self.full_robot.dof_index(joint_name) + q_full[full_idx] = value + + return q_full + + def full_to_reduced_configuration(self, q_full: np.ndarray) -> np.ndarray: + """ + Convert a full configuration to the reduced configuration space. + + :param q_full: Configuration in full space + :return: Configuration in reduced space + """ + if q_full.shape[0] != self.full_robot.num_dofs: + raise ValueError( + f"Expected q_full of length {self.full_robot.num_dofs}, got {q_full.shape[0]} instead" + ) + + q_reduced = np.zeros(self.num_dofs) + + # Copy active joints + for reduced_idx, full_idx in enumerate(self.reduced_to_full): + q_reduced[reduced_idx] = q_full[full_idx] + + return q_reduced + + def cache_forward_kinematics(self, q_reduced: np.ndarray, auto_clip=True) -> None: + """ + Perform forward kinematics using the reduced configuration. + + :param q_reduced: Configuration in reduced space + """ + # First update the full robot's forward kinematics + q_full = self.reduced_to_full_configuration(q_reduced) + self.full_robot.cache_forward_kinematics(q_full, auto_clip) + + # Then update the reduced model's forward kinematics + pin.framesForwardKinematics( + self.pinocchio_wrapper.model, self.pinocchio_wrapper.data, q_reduced + ) + + def clip_configuration(self, q_reduced: np.ndarray, margin: float = 1e-6) -> np.ndarray: + """ + Clip the reduced configuration to stay within joint limits with a small tolerance. + + :param q_reduced: Configuration to clip + :param margin: Tolerance to keep away from joint limits + :return: Clipped configuration + """ + q_full = self.reduced_to_full_configuration(q_reduced) + q_full_clipped = self.full_robot.clip_configuration(q_full, margin) + return self.full_to_reduced_configuration(q_full_clipped) + + def reset_forward_kinematics(self): + """ + Reset the forward kinematics to the initial configuration. + """ + # Reset full robot's forward kinematics + self.full_robot.reset_forward_kinematics() + # Reset reduced model's forward kinematics + self.cache_forward_kinematics(self.q_zero) diff --git a/decoupled_wbc/control/robot_model/supplemental_info/__init__.py b/decoupled_wbc/control/robot_model/supplemental_info/__init__.py new file mode 100644 index 0000000..4d2e1e6 --- /dev/null +++ b/decoupled_wbc/control/robot_model/supplemental_info/__init__.py @@ -0,0 +1,5 @@ +from decoupled_wbc.control.robot_model.supplemental_info.robot_supplemental_info import ( + RobotSupplementalInfo, +) + +__all__ = ["RobotSupplementalInfo"] diff --git a/gr00t_wbc/control/robot_model/supplemental_info/g1/__init__.py b/decoupled_wbc/control/robot_model/supplemental_info/g1/__init__.py similarity index 100% rename from gr00t_wbc/control/robot_model/supplemental_info/g1/__init__.py rename to decoupled_wbc/control/robot_model/supplemental_info/g1/__init__.py diff --git a/decoupled_wbc/control/robot_model/supplemental_info/g1/g1_supplemental_info.py b/decoupled_wbc/control/robot_model/supplemental_info/g1/g1_supplemental_info.py new file mode 100644 index 0000000..da7dc65 --- /dev/null +++ b/decoupled_wbc/control/robot_model/supplemental_info/g1/g1_supplemental_info.py @@ -0,0 +1,330 @@ +from dataclasses import dataclass +from enum import Enum + +import numpy as np + +from decoupled_wbc.control.robot_model.supplemental_info.robot_supplemental_info import ( + RobotSupplementalInfo, +) + + +class WaistLocation(Enum): + """Enum for waist location configuration.""" + + LOWER_BODY = "lower_body" + UPPER_BODY = "upper_body" + LOWER_AND_UPPER_BODY = "lower_and_upper_body" + + +class ElbowPose(Enum): + """Enum for elbow pose configuration.""" + + LOW = "low" + HIGH = "high" + + +@dataclass +class G1SupplementalInfo(RobotSupplementalInfo): + """ + Supplemental information for the G1 robot. + + Args: + waist_location: Where to place waist joints in the joint groups + elbow_pose: Which elbow pose configuration to use for default joint positions + """ + + def __init__( + self, + waist_location: WaistLocation = WaistLocation.LOWER_BODY, + elbow_pose: ElbowPose = ElbowPose.LOW, + ): + name = "G1_G1ThreeFinger" + + # Define all actuated joints + body_actuated_joints = [ + # Left leg + "left_hip_pitch_joint", + "left_hip_roll_joint", + "left_hip_yaw_joint", + "left_knee_joint", + "left_ankle_pitch_joint", + "left_ankle_roll_joint", + # Right leg + "right_hip_pitch_joint", + "right_hip_roll_joint", + "right_hip_yaw_joint", + "right_knee_joint", + "right_ankle_pitch_joint", + "right_ankle_roll_joint", + # Waist + "waist_yaw_joint", + "waist_roll_joint", + "waist_pitch_joint", + # Left arm + "left_shoulder_pitch_joint", + "left_shoulder_roll_joint", + "left_shoulder_yaw_joint", + "left_elbow_joint", + "left_wrist_roll_joint", + "left_wrist_pitch_joint", + "left_wrist_yaw_joint", + # Right arm + "right_shoulder_pitch_joint", + "right_shoulder_roll_joint", + "right_shoulder_yaw_joint", + "right_elbow_joint", + "right_wrist_roll_joint", + "right_wrist_pitch_joint", + "right_wrist_yaw_joint", + ] + + left_hand_actuated_joints = [ + # Left hand + "left_hand_thumb_0_joint", + "left_hand_thumb_1_joint", + "left_hand_thumb_2_joint", + "left_hand_index_0_joint", + "left_hand_index_1_joint", + "left_hand_middle_0_joint", + "left_hand_middle_1_joint", + ] + + right_hand_actuated_joints = [ + # Right hand + "right_hand_thumb_0_joint", + "right_hand_thumb_1_joint", + "right_hand_thumb_2_joint", + "right_hand_index_0_joint", + "right_hand_index_1_joint", + "right_hand_middle_0_joint", + "right_hand_middle_1_joint", + ] + + # Define joint limits from URDF + joint_limits = { + # Left leg + "left_hip_pitch_joint": [-2.5307, 2.8798], + "left_hip_roll_joint": [-0.5236, 2.9671], + "left_hip_yaw_joint": [-2.7576, 2.7576], + "left_knee_joint": [-0.087267, 2.8798], + "left_ankle_pitch_joint": [-0.87267, 0.5236], + "left_ankle_roll_joint": [-0.2618, 0.2618], + # Right leg + "right_hip_pitch_joint": [-2.5307, 2.8798], + "right_hip_roll_joint": [-2.9671, 0.5236], + "right_hip_yaw_joint": [-2.7576, 2.7576], + "right_knee_joint": [-0.087267, 2.8798], + "right_ankle_pitch_joint": [-0.87267, 0.5236], + "right_ankle_roll_joint": [-0.2618, 0.2618], + # Waist + "waist_yaw_joint": [-2.618, 2.618], + "waist_roll_joint": [-0.52, 0.52], + "waist_pitch_joint": [-0.52, 0.52], + # Left arm + "left_shoulder_pitch_joint": [-3.0892, 2.6704], + "left_shoulder_roll_joint": [0.19, 2.2515], + "left_shoulder_yaw_joint": [-2.618, 2.618], + "left_elbow_joint": [-1.0472, 2.0944], + "left_wrist_roll_joint": [-1.972222054, 1.972222054], + "left_wrist_pitch_joint": [-1.614429558, 1.614429558], + "left_wrist_yaw_joint": [-1.614429558, 1.614429558], + # Right arm + "right_shoulder_pitch_joint": [-3.0892, 2.6704], + "right_shoulder_roll_joint": [-2.2515, -0.19], + "right_shoulder_yaw_joint": [-2.618, 2.618], + "right_elbow_joint": [-1.0472, 2.0944], + "right_wrist_roll_joint": [-1.972222054, 1.972222054], + "right_wrist_pitch_joint": [-1.614429558, 1.614429558], + "right_wrist_yaw_joint": [-1.614429558, 1.614429558], + # Left hand + "left_hand_thumb_0_joint": [-1.04719755, 1.04719755], + "left_hand_thumb_1_joint": [-0.72431163, 1.04719755], + "left_hand_thumb_2_joint": [0, 1.74532925], + "left_hand_index_0_joint": [-1.57079632, 0], + "left_hand_index_1_joint": [-1.74532925, 0], + "left_hand_middle_0_joint": [-1.57079632, 0], + "left_hand_middle_1_joint": [-1.74532925, 0], + # Right hand + "right_hand_thumb_0_joint": [-1.04719755, 1.04719755], + "right_hand_thumb_1_joint": [-0.72431163, 1.04719755], + "right_hand_thumb_2_joint": [0, 1.74532925], + "right_hand_index_0_joint": [-1.57079632, 0], + "right_hand_index_1_joint": [-1.74532925, 0], + "right_hand_middle_0_joint": [-1.57079632, 0], + "right_hand_middle_1_joint": [-1.74532925, 0], + } + + # Define joint groups + joint_groups = { + # Body groups + "waist": { + "joints": ["waist_yaw_joint", "waist_roll_joint", "waist_pitch_joint"], + "groups": [], + }, + # Leg groups + "left_leg": { + "joints": [ + "left_hip_pitch_joint", + "left_hip_roll_joint", + "left_hip_yaw_joint", + "left_knee_joint", + "left_ankle_pitch_joint", + "left_ankle_roll_joint", + ], + "groups": [], + }, + "right_leg": { + "joints": [ + "right_hip_pitch_joint", + "right_hip_roll_joint", + "right_hip_yaw_joint", + "right_knee_joint", + "right_ankle_pitch_joint", + "right_ankle_roll_joint", + ], + "groups": [], + }, + "legs": {"joints": [], "groups": ["left_leg", "right_leg"]}, + # Arm groups + "left_arm": { + "joints": [ + "left_shoulder_pitch_joint", + "left_shoulder_roll_joint", + "left_shoulder_yaw_joint", + "left_elbow_joint", + "left_wrist_roll_joint", + "left_wrist_pitch_joint", + "left_wrist_yaw_joint", + ], + "groups": [], + }, + "right_arm": { + "joints": [ + "right_shoulder_pitch_joint", + "right_shoulder_roll_joint", + "right_shoulder_yaw_joint", + "right_elbow_joint", + "right_wrist_roll_joint", + "right_wrist_pitch_joint", + "right_wrist_yaw_joint", + ], + "groups": [], + }, + "arms": {"joints": [], "groups": ["left_arm", "right_arm"]}, + # Hand groups + "left_hand": { + "joints": [ + "left_hand_index_0_joint", + "left_hand_index_1_joint", + "left_hand_middle_0_joint", + "left_hand_middle_1_joint", + "left_hand_thumb_0_joint", + "left_hand_thumb_1_joint", + "left_hand_thumb_2_joint", + ], + "groups": [], + }, + "right_hand": { + "joints": [ + "right_hand_index_0_joint", + "right_hand_index_1_joint", + "right_hand_middle_0_joint", + "right_hand_middle_1_joint", + "right_hand_thumb_0_joint", + "right_hand_thumb_1_joint", + "right_hand_thumb_2_joint", + ], + "groups": [], + }, + "hands": {"joints": [], "groups": ["left_hand", "right_hand"]}, + # Full body groups + "lower_body": {"joints": [], "groups": ["waist", "legs"]}, + "upper_body_no_hands": {"joints": [], "groups": ["arms"]}, + "body": {"joints": [], "groups": ["lower_body", "upper_body_no_hands"]}, + "upper_body": {"joints": [], "groups": ["upper_body_no_hands", "hands"]}, + } + + # Define joint name mapping from generic types to robot-specific names + joint_name_mapping = { + # Waist joints + "waist_pitch": "waist_pitch_joint", + "waist_roll": "waist_roll_joint", + "waist_yaw": "waist_yaw_joint", + # Shoulder joints + "shoulder_pitch": { + "left": "left_shoulder_pitch_joint", + "right": "right_shoulder_pitch_joint", + }, + "shoulder_roll": { + "left": "left_shoulder_roll_joint", + "right": "right_shoulder_roll_joint", + }, + "shoulder_yaw": { + "left": "left_shoulder_yaw_joint", + "right": "right_shoulder_yaw_joint", + }, + # Elbow joints + "elbow_pitch": {"left": "left_elbow_joint", "right": "right_elbow_joint"}, + # Wrist joints + "wrist_pitch": {"left": "left_wrist_pitch_joint", "right": "right_wrist_pitch_joint"}, + "wrist_roll": {"left": "left_wrist_roll_joint", "right": "right_wrist_roll_joint"}, + "wrist_yaw": {"left": "left_wrist_yaw_joint", "right": "right_wrist_yaw_joint"}, + } + + root_frame_name = "pelvis" + + hand_frame_names = {"left": "left_wrist_yaw_link", "right": "right_wrist_yaw_link"} + + elbow_calibration_joint_angles = {"left": 0.0, "right": 0.0} + + hand_rotation_correction = np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]]) + + # Configure default joint positions based on elbow pose + if elbow_pose == ElbowPose.HIGH: + default_joint_q = { + "shoulder_roll": {"left": 0.5, "right": -0.5}, + "shoulder_pitch": {"left": -0.2, "right": -0.2}, + "shoulder_yaw": {"left": -0.5, "right": 0.5}, + "wrist_roll": {"left": -0.5, "right": 0.5}, + "wrist_yaw": {"left": 0.5, "right": -0.5}, + "wrist_pitch": {"left": -0.2, "right": -0.2}, + } + else: # ElbowPose.LOW + default_joint_q = { + "shoulder_roll": {"left": 0.2, "right": -0.2}, + } + + teleop_upper_body_motion_scale = 1.0 + + # Configure joint groups based on waist location + modified_joint_groups = joint_groups.copy() + if waist_location == WaistLocation.UPPER_BODY: + # Move waist from lower_body to upper_body_no_hands + modified_joint_groups["lower_body"] = {"joints": [], "groups": ["legs"]} + modified_joint_groups["upper_body_no_hands"] = { + "joints": [], + "groups": ["arms", "waist"], + } + elif waist_location == WaistLocation.LOWER_AND_UPPER_BODY: + # Add waist to upper_body_no_hands while keeping it in lower_body + modified_joint_groups["upper_body_no_hands"] = { + "joints": [], + "groups": ["arms", "waist"], + } + # For LOWER_BODY, keep default joint_groups as is + + super().__init__( + name=name, + body_actuated_joints=body_actuated_joints, + left_hand_actuated_joints=left_hand_actuated_joints, + right_hand_actuated_joints=right_hand_actuated_joints, + joint_limits=joint_limits, + joint_groups=modified_joint_groups, + root_frame_name=root_frame_name, + hand_frame_names=hand_frame_names, + elbow_calibration_joint_angles=elbow_calibration_joint_angles, + joint_name_mapping=joint_name_mapping, + hand_rotation_correction=hand_rotation_correction, + default_joint_q=default_joint_q, + teleop_upper_body_motion_scale=teleop_upper_body_motion_scale, + ) diff --git a/gr00t_wbc/control/robot_model/supplemental_info/robot_supplemental_info.py b/decoupled_wbc/control/robot_model/supplemental_info/robot_supplemental_info.py similarity index 100% rename from gr00t_wbc/control/robot_model/supplemental_info/robot_supplemental_info.py rename to decoupled_wbc/control/robot_model/supplemental_info/robot_supplemental_info.py diff --git a/gr00t_wbc/control/sensor/__init__.py b/decoupled_wbc/control/sensor/__init__.py similarity index 100% rename from gr00t_wbc/control/sensor/__init__.py rename to decoupled_wbc/control/sensor/__init__.py diff --git a/decoupled_wbc/control/sensor/composed_camera.py b/decoupled_wbc/control/sensor/composed_camera.py new file mode 100644 index 0000000..675a935 --- /dev/null +++ b/decoupled_wbc/control/sensor/composed_camera.py @@ -0,0 +1,440 @@ +from collections import deque +from dataclasses import dataclass +import queue +import threading +import time +from typing import Any, Dict, Optional + +# we need to import these first in this order to avoid TSL segmentation fault +# caused by zed and oak libraries +try: + import cv2 # noqa + import depthai as dai # noqa + import pyzed.sl as sl # noqa +except ImportError: + print( + """ + Some of the camera specific dependencies are not installed. If you are + not running this on the robot, having these libraries is optional. + """ + ) + +import numpy as np # noqa + +from decoupled_wbc.control.base.sensor import Sensor +from decoupled_wbc.control.sensor.sensor_server import ( + ImageMessageSchema, + SensorClient, + SensorServer, + CameraMountPosition, +) + + +def read_qr_code(data): + current_time = time.monotonic() + detector = cv2.QRCodeDetector() + for key, img in data["images"].items(): + decoded_time, bbox, _ = detector.detectAndDecode(img) + if bbox is not None and decoded_time: + print(f"{key} latency: {(current_time - float(decoded_time)) * 1e3:.1f} ms") + else: + print(f"{key} QR code not detected.") + + +@dataclass +class ComposedCameraConfig: + """Camera configuration for composed camera""" + + ego_view_camera: Optional[str] = "oak" + """Camera type for ego view: oak, realsense, zed, or None""" + + ego_view_device_id: Optional[str] = None + """Device ID for ego view camera (optional, used for OAK cameras)""" + + head_camera: Optional[str] = None + """Camera type for head view: oak, oak_mono, realsense, zed or None""" + + head_device_id: Optional[str] = None + """Device ID for head camera (optional, used for OAK cameras)""" + + left_wrist_camera: Optional[str] = None + """Camera type for left wrist view: oak, realsense, zed or None""" + + left_wrist_device_id: Optional[str] = None + """Device ID for left wrist camera (optional, used for OAK cameras)""" + + right_wrist_camera: Optional[str] = None + """Camera type for right wrist view: oak, realsense, zed or None""" + + right_wrist_device_id: Optional[str] = None + """Device ID for right wrist camera (optional, used for OAK cameras)""" + + fps: int = 30 + """Rate at which the composed camera will publish the images. Since composed camera + can read from multiple cameras, it will publish all the images. + Note that OAK can only run at 30 FPS. 20 FPS will cause large latency. + """ + + # Server configuration + run_as_server: bool = True + """Whether to run as server or client""" + + server: bool = True + """Whether to run the camera as a server""" + + port: int = 5555 + """Port number for server/client communication""" + + test_latency: bool = False + """Whether to test latency""" + + # Queue configuration + queue_size: int = 3 + """Size of each camera's image queue""" + + def __post_init__(self): + # runyu: Note that this is a hack to make the config work with G1 camera server in orin + # we should not use this hack in the future + self.run_as_server: bool = self.server + + +class ComposedCameraSensor(Sensor, SensorServer): + + def __init__(self, config: ComposedCameraConfig): + self.config = config + self.camera_queues: Dict[str, queue.Queue] = {} + self.camera_threads: Dict[str, threading.Thread] = {} + self.shutdown_events: Dict[str, threading.Event] = {} + self.error_events: Dict[str, threading.Event] = {} + self.error_messages: Dict[str, str] = {} + self._observation_spaces: Dict[str, Any] = {} + + camera_configs = self._get_camera_configs() + + # Then create worker threads + for mount_position, camera_config in camera_configs.items(): + # Create queue and shutdown event for this camera + camera_queue = queue.Queue(maxsize=config.queue_size) + shutdown_event = threading.Event() + error_event = threading.Event() + + self.camera_queues[mount_position] = camera_queue + self.shutdown_events[mount_position] = shutdown_event + self.error_events[mount_position] = error_event + + # Start camera thread + thread = threading.Thread( + target=self._camera_worker_wrapper, + args=( + mount_position, + camera_config["camera_type"], + camera_config["device_id"], + camera_queue, + shutdown_event, + error_event, + ), + ) + thread.start() + self.camera_threads[mount_position] = thread + + if config.run_as_server: + self.start_server(config.port) + + def _get_camera_configs(self) -> Dict[str, str]: + """Get camera configurations as mount_position -> camera_type mapping""" + camera_configs = {} + + if self.config.ego_view_camera is not None: + camera_configs[CameraMountPosition.EGO_VIEW.value] = { + "camera_type": self.config.ego_view_camera, + "device_id": self.config.ego_view_device_id, + } + + if self.config.head_camera is not None: + camera_configs[CameraMountPosition.HEAD.value] = { + "camera_type": self.config.head_camera, + "device_id": self.config.head_device_id, + } + + if self.config.left_wrist_camera is not None: + camera_configs[CameraMountPosition.LEFT_WRIST.value] = { + "camera_type": self.config.left_wrist_camera, + "device_id": self.config.left_wrist_device_id, + } + + if self.config.right_wrist_camera is not None: + camera_configs[CameraMountPosition.RIGHT_WRIST.value] = { + "camera_type": self.config.right_wrist_camera, + "device_id": self.config.right_wrist_device_id, + } + + return camera_configs + + def _camera_worker_wrapper( + self, + mount_position: str, + camera_type: str, + device_id: Optional[str], + image_queue: queue.Queue, + shutdown_event: threading.Event, + error_event: threading.Event, + ): + """Worker thread that continuously captures from a single camera""" + try: + camera = self._instantiate_camera(mount_position, camera_type, device_id) + self._observation_spaces[mount_position] = camera.observation_space() + + consecutive_failures = 0 + max_consecutive_failures = 5 + + while not shutdown_event.is_set(): + frame = camera.read() + if frame: + consecutive_failures = 0 # Reset on successful read + # Non-blocking queue put with frame dropping + try: + image_queue.put_nowait(frame) + except queue.Full: + # Remove oldest frame and add new one + try: + image_queue.get_nowait() + image_queue.put_nowait(frame) + except queue.Empty: + pass + else: + consecutive_failures += 1 + if consecutive_failures >= max_consecutive_failures: + error_msg = ( + f"Camera {mount_position} ({camera_type}) dropped: " + f"failed to read {consecutive_failures} consecutive frames" + ) + print(f"[ERROR] {error_msg}") + self.error_messages[mount_position] = error_msg + error_event.set() + break + + camera.close() + + except Exception as e: + error_msg = f"Camera {mount_position} ({camera_type}) error: {str(e)}" + print(f"[ERROR] {error_msg}") + self.error_messages[mount_position] = error_msg + error_event.set() + + def _instantiate_camera( + self, mount_position: str, camera_type: str, device_id: Optional[str] = None + ) -> Sensor: + """ + Instantiate a camera sensor based on the camera type. + + Args: + camera_type: Type of camera ("oak", "oak_mono", "realsense", "zed") + device_id: Optional device ID for the camera (used for OAK cameras) + + Returns: + Sensor instance for the specified camera type + """ + if camera_type in ("oak", "oak_mono"): + from decoupled_wbc.control.sensor.oak import OAKConfig, OAKSensor + + oak_config = OAKConfig() + if camera_type == "oak_mono": + oak_config.enable_mono_cameras = True + print("Initializing OAK sensor for camera type: ", camera_type) + return OAKSensor(config=oak_config, mount_position=mount_position, device_id=device_id) + elif camera_type == "realsense": + from decoupled_wbc.control.sensor.realsense import RealSenseSensor + + print("Initializing RealSense sensor for camera type: ", camera_type) + return RealSenseSensor(mount_position=mount_position) + elif camera_type == "zed": + from decoupled_wbc.control.sensor.zed import ZEDSensor + + print("Initializing ZED sensor for camera type: ", camera_type) + return ZEDSensor(mount_position=mount_position) + elif camera_type.endswith(".mp4"): + from decoupled_wbc.control.sensor.dummy import ReplayDummySensor + + print("Initializing Replay Dummy Sensor for camera type: ", camera_type) + return ReplayDummySensor(video_path=camera_type) + else: + raise ValueError(f"Unsupported camera type: {camera_type}") + + def _check_for_errors(self): + """Check if any camera thread has encountered an error and raise exception if so.""" + for mount_position, error_event in self.error_events.items(): + if error_event.is_set(): + error_msg = self.error_messages.get( + mount_position, f"Camera {mount_position} encountered an unknown error" + ) + raise RuntimeError(error_msg) + + def read(self): + """Read frames from all cameras.""" + # Check for errors from camera threads + self._check_for_errors() + + message = {} + for mount_position, camera_queue in self.camera_queues.items(): + frame = self._get_latest_from_queue(camera_queue) + if frame is not None: + message[mount_position] = frame + return message + + def _get_latest_from_queue(self, camera_queue: queue.Queue) -> Optional[Dict[str, Any]]: + """Get most recent frame, discard older ones""" + latest = None + try: + while True: + latest = camera_queue.get_nowait() + except queue.Empty: + pass + return latest + + def close(self): + """Close all cameras.""" + # Signal all worker threads to shutdown + for shutdown_event in self.shutdown_events.values(): + shutdown_event.set() + + # Wait for all threads to finish + for thread in self.camera_threads.values(): + thread.join(timeout=5.0) + + # Clear queues + for camera_queue in self.camera_queues.values(): + try: + while True: + camera_queue.get_nowait() + except queue.Empty: + pass + + # Stop server if running + if self.config.run_as_server: + self.stop_server() + + def serialize_message(self, message: Dict[str, Any]) -> Dict[str, Any]: + """Merge all camera data into a single ImageMessageSchema.""" + all_timestamps = {} + all_images = {} + + for _, camera_data in message.items(): + all_timestamps.update(camera_data.get("timestamps", {})) + all_images.update(camera_data.get("images", {})) + + # Create a single ImageMessageSchema with all data + img_schema = ImageMessageSchema(timestamps=all_timestamps, images=all_images) + return img_schema.serialize() + + def run_server(self): + """Run the server.""" + idx = 0 + server_start_time = time.monotonic() + fps_print_time = time.monotonic() + frame_interval = 1.0 / self.config.fps + + while True: + # Calculate when this frame should ideally complete + target_time = server_start_time + (idx + 1) * frame_interval + + message = self.read() + if message: + if self.config.test_latency: + read_qr_code(message) + + serialized_message = self.serialize_message(message) + self.send_message(serialized_message) + idx += 1 + + if idx % 10 == 0: + print(f"Image sending FPS: {10 / (time.monotonic() - fps_print_time):.2f}") + fps_print_time = time.monotonic() + + # Sleep to maintain precise timing + current_time = time.monotonic() + sleep_time = target_time - current_time + if sleep_time > 0: + time.sleep(sleep_time) + else: + # If we're behind, increment idx to stay on schedule + if not message: + idx += 1 + + def observation_space(self): + """Return the observation space.""" + import gymnasium as gym + + return gym.spaces.Dict(self._observation_spaces) + + +class ComposedCameraClientSensor(Sensor, SensorClient): + """Class that serves as client for multiple different cameras.""" + + def __init__(self, server_ip: str = "localhost", port: int = 5555): + self.start_client(server_ip, port) + + # Initialize tracking variables + self._latest_message = {} + self._avg_time_per_frame = deque(maxlen=20) + self._msg_received_time = 0 + self._start_time = 0.0 # Initialize _start_time + self.idx = 0 + + print("Initialized composed camera client sensor") + + def read(self, **kwargs) -> Optional[Dict[str, Any]]: + self._start_time = time.time() + message = self.receive_message() + if not message: + return None + self.idx += 1 + + self._latest_message = ImageMessageSchema.deserialize(message).asdict() + + # if self.idx % 10 == 0: + # for image_key, image_time in self._latest_message["timestamps"].items(): + # image_latency = (time.time() - image_time) * 1000 + # print(f"Image latency for {image_key}: {image_latency:.2f} ms") + + self._msg_received_time = time.time() + self._avg_time_per_frame.append(self._msg_received_time - self._start_time) + + return self._latest_message + + def close(self): + """Close the client connection.""" + self.stop_client() + + def fps(self) -> float: + """Get the current FPS of the client.""" + if len(self._avg_time_per_frame) == 0: + return 0.0 + return float(1 / np.mean(self._avg_time_per_frame)) + + +if __name__ == "__main__": + """Test function for ComposedCamera.""" + import tyro + + config = tyro.cli(ComposedCameraConfig) + + if config.run_as_server: + composed_camera = ComposedCameraSensor(config) + print("Running composed camera server...") + composed_camera.run_server() + + else: + # Client mode + composed_client = ComposedCameraClientSensor(server_ip="localhost", port=config.port) + + try: + while True: + data = composed_client.read() + if data is not None: + print(f"FPS: {composed_client.fps():.2f}") + if "timestamp" in data: + print(f"Timestamp: {data['timestamp']}") + time.sleep(0.1) + except KeyboardInterrupt: + print("Stopping client...") + composed_client.close() diff --git a/decoupled_wbc/control/sensor/oak.py b/decoupled_wbc/control/sensor/oak.py new file mode 100644 index 0000000..ae7b532 --- /dev/null +++ b/decoupled_wbc/control/sensor/oak.py @@ -0,0 +1,324 @@ +import time +from typing import Any, Dict, Optional, Tuple + +import cv2 +import depthai as dai +import gymnasium as gym +import numpy as np + +from decoupled_wbc.control.base.sensor import Sensor +from decoupled_wbc.control.sensor.sensor_server import ( + CameraMountPosition, + ImageMessageSchema, + SensorServer, +) + + +class OAKConfig: + """Configuration for the OAK camera.""" + + color_image_dim: Tuple[int, int] = (640, 480) # RGB camera resolution + monochrome_image_dim: Tuple[int, int] = (640, 480) # Monochrome camera resolution + fps: int = 30 + enable_color: bool = True # Enable CAM_A (RGB) + enable_mono_cameras: bool = False # Enable CAM_B & CAM_C (Monochrome stereo pair) + mount_position: str = CameraMountPosition.EGO_VIEW.value + + +class OAKSensor(Sensor, SensorServer): + """Sensor for the OAK camera family.""" + + def __init__( + self, + run_as_server: bool = False, + port: int = 5555, + config: OAKConfig = OAKConfig(), + device_id: Optional[str] = None, + mount_position: str = CameraMountPosition.EGO_VIEW.value, + ): + """Initialize the OAK camera.""" + self.config = config + self.mount_position = mount_position + self._run_as_server = run_as_server + + device_infos = dai.Device.getAllAvailableDevices() + assert len(device_infos) > 0, f"No OAK devices found for {mount_position}" + print(f"Device infos: {device_infos}") + if device_id is not None: + device_found = False + for device_info in device_infos: + if device_info.getDeviceId() == device_id: + self.device = dai.Device(device_info) + device_found = True + break + if not device_found: + raise ValueError(f"Device with ID {device_id} not found") + else: + self.device = dai.Device() + + print(f"Connected to OAK device: {self.device.getDeviceName(), self.device.getDeviceId()}") + print(f"Device ID: {self.device.getDeviceId()}") + + sockets: list[dai.CameraBoardSocket] = self.device.getConnectedCameras() + print(f"Available cameras: {[str(s) for s in sockets]}") + + # Create pipeline (without context manager to persist across method calls) + self.pipeline = dai.Pipeline(self.device) + self.output_queues = {} + + # Configure RGB camera (CAM_A) + if config.enable_color and dai.CameraBoardSocket.CAM_A in sockets: + self.cam_rgb = self.pipeline.create(dai.node.Camera) + cam_socket = dai.CameraBoardSocket.CAM_A + self.cam_rgb = self.cam_rgb.build(cam_socket) + # Create RGB output queue + self.output_queues["color"] = self.cam_rgb.requestOutput( + config.color_image_dim, + fps=config.fps, + ).createOutputQueue() + print("Enabled CAM_A (RGB)") + + # Configure Monochrome cameras (CAM_B & CAM_C) + if config.enable_mono_cameras: + if dai.CameraBoardSocket.CAM_B in sockets: + self.cam_mono_left = self.pipeline.create(dai.node.Camera) + cam_socket = dai.CameraBoardSocket.CAM_B + self.cam_mono_left = self.cam_mono_left.build(cam_socket) + # Create mono left output queue + self.output_queues["mono_left"] = self.cam_mono_left.requestOutput( + config.monochrome_image_dim, + fps=config.fps, + ).createOutputQueue() + print("Enabled CAM_B (Monochrome Left)") + + if dai.CameraBoardSocket.CAM_C in sockets: + self.cam_mono_right = self.pipeline.create(dai.node.Camera) + cam_socket = dai.CameraBoardSocket.CAM_C + self.cam_mono_right = self.cam_mono_right.build(cam_socket) + # Create mono right output queue + self.output_queues["mono_right"] = self.cam_mono_right.requestOutput( + config.monochrome_image_dim, + fps=config.fps, + ).createOutputQueue() + print("Enabled CAM_C (Monochrome Right)") + + assert len(self.output_queues) > 0, "No output queues enabled" + # auto exposure compensation, for CoRL demo + # cam_q_in = self.cam_rgb.inputControl.createInputQueue() + # ctrl = dai.CameraControl() + # ctrl.setAutoExposureEnable() + # ctrl.setAutoExposureCompensation(-2) + # cam_q_in.send(ctrl) + + # Start pipeline on device + self.pipeline.start() + + if run_as_server: + self.start_server(port) + + def read(self) -> Optional[Dict[str, Any]]: + """Read images from the camera.""" + if not self.pipeline.isRunning(): + print(f"[ERROR] OAK pipeline stopped for {self.mount_position}") + return None + + # Check if device is still connected + if not self.device.isPipelineRunning(): + print(f"[ERROR] OAK device disconnected for {self.mount_position}") + return None + + timestamps = {} + images = {} + rgb_frame_time = None + + # Get color frame if enabled + if "color" in self.output_queues: + try: + rgb_frame = self.output_queues["color"].get() + rgb_frame_time = rgb_frame.getTimestamp() + if rgb_frame is not None: + images[self.mount_position] = rgb_frame.getCvFrame()[..., ::-1] # BGR to RGB + timestamps[self.mount_position] = ( + rgb_frame_time - dai.Clock.now() + ).total_seconds() + time.time() + except Exception as e: + print(f"[ERROR] Failed to read color frame from {self.mount_position}: {e}") + return None + + # Get mono frames if enabled + if "mono_left" in self.output_queues: + try: + mono_left_frame = self.output_queues["mono_left"].get() + mono_left_frame_time = mono_left_frame.getTimestamp() + if mono_left_frame is not None: + key = f"{self.mount_position}_left_mono" + images[key] = mono_left_frame.getCvFrame() + timestamps[key] = ( + mono_left_frame_time - dai.Clock.now() + ).total_seconds() + time.time() + except Exception as e: + print(f"[ERROR] Failed to read mono_left frame from {self.mount_position}: {e}") + return None + + if "mono_right" in self.output_queues: + try: + mono_right_frame = self.output_queues["mono_right"].get() + mono_right_frame_time = mono_right_frame.getTimestamp() + if mono_right_frame is not None: + key = f"{self.mount_position}_right_mono" + images[key] = mono_right_frame.getCvFrame() + timestamps[key] = ( + mono_right_frame_time - dai.Clock.now() + ).total_seconds() + time.time() + except Exception as e: + print(f"[ERROR] Failed to read mono_right frame from {self.mount_position}: {e}") + return None + + if ( + rgb_frame_time is not None + and (rgb_frame_time - dai.Clock.now()).total_seconds() <= -0.2 + ): + print( + f"[{self.mount_position}] OAK latency too large: " + f"{(dai.Clock.now() - rgb_frame_time).total_seconds() * 1000}ms" + ) + + return { + "timestamps": timestamps, + "images": images, + } + + def serialize(self, data: Dict[str, Any]) -> Dict[str, Any]: + """Serialize data using ImageMessageSchema.""" + serialized_msg = ImageMessageSchema(timestamps=data["timestamps"], images=data["images"]) + return serialized_msg.serialize() + + def observation_space(self) -> gym.Space: + spaces = {} + + if self.config.enable_color: + spaces["color_image"] = gym.spaces.Box( + low=0, + high=255, + shape=(self.config.color_image_dim[1], self.config.color_image_dim[0], 3), + dtype=np.uint8, + ) + + if self.config.enable_mono_cameras: + spaces["mono_left_image"] = gym.spaces.Box( + low=0, + high=255, + shape=(self.config.monochrome_image_dim[1], self.config.monochrome_image_dim[0]), + dtype=np.uint8, + ) + spaces["mono_right_image"] = gym.spaces.Box( + low=0, + high=255, + shape=(self.config.monochrome_image_dim[1], self.config.monochrome_image_dim[0]), + dtype=np.uint8, + ) + + return gym.spaces.Dict(spaces) + + def close(self): + """Close the camera connection.""" + if self._run_as_server: + self.stop_server() + if hasattr(self, "pipeline") and self.pipeline.isRunning(): + self.pipeline.stop() + self.device.close() + + def run_server(self): + """Run the server.""" + if not self._run_as_server: + raise ValueError("This function is only available when run_as_server is True") + + while True: + frame = self.read() + if frame is None: + continue + + msg = self.serialize(frame) + self.send_message({self.mount_position: msg}) + + def __del__(self): + self.close() + + +if __name__ == "__main__": + """Test function for OAK camera.""" + + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("--server", action="store_true", help="Run as server") + parser.add_argument("--client", action="store_true", help="Run as client") + parser.add_argument("--host", type=str, default="localhost", help="Server IP address") + parser.add_argument("--port", type=int, default=5555, help="Port number") + parser.add_argument("--device-id", type=str, default=None, help="Specific device ID") + parser.add_argument( + "--enable-mono", action="store_true", help="Enable monochrome cameras (CAM_B & CAM_C)" + ) + parser.add_argument("--mount-position", type=str, default="ego_view", help="Mount position") + parser.add_argument("--show-image", action="store_true", help="Display images") + args = parser.parse_args() + + oak_config = OAKConfig() + if args.enable_mono: + oak_config.enable_mono_cameras = True + + if args.server: + # Run as server + oak = OAKSensor( + run_as_server=True, + port=args.port, + config=oak_config, + device_id=args.device_id, + mount_position=args.mount_position, + ) + print(f"Starting OAK server on port {args.port}") + oak.run_server() + + else: + # Run standalone + oak = OAKSensor(run_as_server=False, config=oak_config, device_id=args.device_id) + print("Running OAK camera in standalone mode") + + while True: + frame = oak.read() + if frame is None: + print("Waiting for frame...") + time.sleep(0.5) + continue + + if "color_image" in frame: + print(f"Color image shape: {frame['color_image'].shape}") + if "mono_left_image" in frame: + print(f"Mono left image shape: {frame['mono_left_image'].shape}") + if "mono_right_image" in frame: + print(f"Mono right image shape: {frame['mono_right_image'].shape}") + if "depth_image" in frame: + print(f"Depth image shape: {frame['depth_image'].shape}") + + if args.show_image: + if "color_image" in frame: + cv2.imshow("Color Image", frame["color_image"]) + + if "mono_left_image" in frame: + cv2.imshow("Mono Left", frame["mono_left_image"]) + if "mono_right_image" in frame: + cv2.imshow("Mono Right", frame["mono_right_image"]) + + if "depth_image" in frame: + depth_colormap = cv2.applyColorMap( + cv2.convertScaleAbs(frame["depth_image"], alpha=0.03), cv2.COLORMAP_JET + ) + cv2.imshow("Depth Image", depth_colormap) + + if cv2.waitKey(1) == ord("q"): + break + + time.sleep(0.01) + + cv2.destroyAllWindows() + oak.close() diff --git a/gr00t_wbc/control/sensor/sensor_server.py b/decoupled_wbc/control/sensor/sensor_server.py similarity index 100% rename from gr00t_wbc/control/sensor/sensor_server.py rename to decoupled_wbc/control/sensor/sensor_server.py diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/ClientLogging.hpp b/decoupled_wbc/control/teleop/device/SDKClient_Linux/ClientLogging.hpp similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/ClientLogging.hpp rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/ClientLogging.hpp diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/ClientPlatformSpecific.cpp b/decoupled_wbc/control/teleop/device/SDKClient_Linux/ClientPlatformSpecific.cpp similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/ClientPlatformSpecific.cpp rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/ClientPlatformSpecific.cpp diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/ClientPlatformSpecific.hpp b/decoupled_wbc/control/teleop/device/SDKClient_Linux/ClientPlatformSpecific.hpp similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/ClientPlatformSpecific.hpp rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/ClientPlatformSpecific.hpp diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/ClientPlatformSpecificTypes.hpp b/decoupled_wbc/control/teleop/device/SDKClient_Linux/ClientPlatformSpecificTypes.hpp similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/ClientPlatformSpecificTypes.hpp rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/ClientPlatformSpecificTypes.hpp diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/Dockerfile b/decoupled_wbc/control/teleop/device/SDKClient_Linux/Dockerfile similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/Dockerfile rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/Dockerfile diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/Dockerfile.Integrated b/decoupled_wbc/control/teleop/device/SDKClient_Linux/Dockerfile.Integrated similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/Dockerfile.Integrated rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/Dockerfile.Integrated diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/Main.cpp b/decoupled_wbc/control/teleop/device/SDKClient_Linux/Main.cpp similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/Main.cpp rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/Main.cpp diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/Makefile b/decoupled_wbc/control/teleop/device/SDKClient_Linux/Makefile similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/Makefile rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/Makefile diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/include/ManusSDK.h b/decoupled_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/include/ManusSDK.h similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/include/ManusSDK.h rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/include/ManusSDK.h diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/include/ManusSDKTypeInitializers.h b/decoupled_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/include/ManusSDKTypeInitializers.h similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/include/ManusSDKTypeInitializers.h rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/include/ManusSDKTypeInitializers.h diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/include/ManusSDKTypes.h b/decoupled_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/include/ManusSDKTypes.h similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/include/ManusSDKTypes.h rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/include/ManusSDKTypes.h diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/lib/libManusSDK.so b/decoupled_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/lib/libManusSDK.so similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/lib/libManusSDK.so rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/lib/libManusSDK.so diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/lib/libManusSDK_Integrated.so b/decoupled_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/lib/libManusSDK_Integrated.so similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/lib/libManusSDK_Integrated.so rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/lib/libManusSDK_Integrated.so diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/ManusServer.cpython-310-x86_64-linux-gnu.so b/decoupled_wbc/control/teleop/device/SDKClient_Linux/ManusServer.cpython-310-x86_64-linux-gnu.so similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/ManusServer.cpython-310-x86_64-linux-gnu.so rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/ManusServer.cpython-310-x86_64-linux-gnu.so diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/Readme.md b/decoupled_wbc/control/teleop/device/SDKClient_Linux/Readme.md similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/Readme.md rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/Readme.md diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/SDKClient.cpp b/decoupled_wbc/control/teleop/device/SDKClient_Linux/SDKClient.cpp similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/SDKClient.cpp rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/SDKClient.cpp diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/SDKClient.hpp b/decoupled_wbc/control/teleop/device/SDKClient_Linux/SDKClient.hpp similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/SDKClient.hpp rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/SDKClient.hpp diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/SDKClient_Linux.vcxproj b/decoupled_wbc/control/teleop/device/SDKClient_Linux/SDKClient_Linux.vcxproj similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/SDKClient_Linux.vcxproj rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/SDKClient_Linux.vcxproj diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/SDKClient_Linux.vcxproj.filters b/decoupled_wbc/control/teleop/device/SDKClient_Linux/SDKClient_Linux.vcxproj.filters similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/SDKClient_Linux.vcxproj.filters rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/SDKClient_Linux.vcxproj.filters diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/objects/ClientPlatformSpecific.d b/decoupled_wbc/control/teleop/device/SDKClient_Linux/objects/ClientPlatformSpecific.d similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/objects/ClientPlatformSpecific.d rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/objects/ClientPlatformSpecific.d diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/objects/ClientPlatformSpecific.o b/decoupled_wbc/control/teleop/device/SDKClient_Linux/objects/ClientPlatformSpecific.o similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/objects/ClientPlatformSpecific.o rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/objects/ClientPlatformSpecific.o diff --git a/decoupled_wbc/control/teleop/device/SDKClient_Linux/objects/Main.d b/decoupled_wbc/control/teleop/device/SDKClient_Linux/objects/Main.d new file mode 100644 index 0000000..4a8fe37 --- /dev/null +++ b/decoupled_wbc/control/teleop/device/SDKClient_Linux/objects/Main.d @@ -0,0 +1,124 @@ +objects/Main.o: Main.cpp ClientLogging.hpp SDKClient.hpp \ + ClientPlatformSpecific.hpp ClientPlatformSpecificTypes.hpp \ + ManusSDK/include/ManusSDK.h ManusSDK/include/ManusSDKTypes.h \ + ManusSDK/include/ManusSDKTypeInitializers.h \ + /home/gear/miniconda3/envs/decoupled_wbc/lib/python3.10/site-packages/pybind11/include/pybind11/pybind11.h \ + /home/gear/miniconda3/envs/decoupled_wbc/lib/python3.10/site-packages/pybind11/include/pybind11/detail/class.h \ + /home/gear/miniconda3/envs/decoupled_wbc/lib/python3.10/site-packages/pybind11/include/pybind11/attr.h \ + /home/gear/miniconda3/envs/decoupled_wbc/lib/python3.10/site-packages/pybind11/include/pybind11/detail/common.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/Python.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/patchlevel.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/pyconfig.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/pymacconfig.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/pyport.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/exports.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/pymacro.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/pymath.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/pymem.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/pymem.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/object.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/object.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/objimpl.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/objimpl.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/typeslots.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/pyhash.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/pydebug.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/bytearrayobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/bytearrayobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/bytesobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/bytesobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/unicodeobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/unicodeobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/longobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/longintrepr.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/boolobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/floatobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/complexobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/rangeobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/memoryobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/tupleobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/tupleobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/listobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/listobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/dictobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/dictobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/odictobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/enumobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/setobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/methodobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/methodobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/moduleobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/funcobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/classobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/fileobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/fileobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/pycapsule.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/code.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/code.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/pyframe.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/traceback.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/traceback.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/sliceobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cellobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/iterobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/initconfig.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/genobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/pystate.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/pystate.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/abstract.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/abstract.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/descrobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/genericaliasobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/warnings.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/weakrefobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/structseq.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/namespaceobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/picklebufobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/pytime.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/codecs.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/pyerrors.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/pyerrors.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/pythread.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/context.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/modsupport.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/compile.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/compile.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/pythonrun.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/pythonrun.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/pylifecycle.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/pylifecycle.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/ceval.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/ceval.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/sysmodule.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/sysmodule.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/osmodule.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/intrcheck.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/import.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/import.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/bltinmodule.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/eval.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/pyctype.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/pystrtod.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/pystrcmp.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/fileutils.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/fileutils.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/pyfpe.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/tracemalloc.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/frameobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/cpython/frameobject.h \ + /home/gear/miniconda3/envs/decoupled_wbc/include/python3.10/pythread.h \ + /home/gear/miniconda3/envs/decoupled_wbc/lib/python3.10/site-packages/pybind11/include/pybind11/cast.h \ + /home/gear/miniconda3/envs/decoupled_wbc/lib/python3.10/site-packages/pybind11/include/pybind11/detail/descr.h \ + /home/gear/miniconda3/envs/decoupled_wbc/lib/python3.10/site-packages/pybind11/include/pybind11/detail/type_caster_base.h \ + /home/gear/miniconda3/envs/decoupled_wbc/lib/python3.10/site-packages/pybind11/include/pybind11/pytypes.h \ + /home/gear/miniconda3/envs/decoupled_wbc/lib/python3.10/site-packages/pybind11/include/pybind11/buffer_info.h \ + /home/gear/miniconda3/envs/decoupled_wbc/lib/python3.10/site-packages/pybind11/include/pybind11/detail/cpp_conduit.h \ + /home/gear/miniconda3/envs/decoupled_wbc/lib/python3.10/site-packages/pybind11/include/pybind11/detail/internals.h \ + /home/gear/miniconda3/envs/decoupled_wbc/lib/python3.10/site-packages/pybind11/include/pybind11/detail/typeid.h \ + /home/gear/miniconda3/envs/decoupled_wbc/lib/python3.10/site-packages/pybind11/include/pybind11/detail/value_and_holder.h \ + /home/gear/miniconda3/envs/decoupled_wbc/lib/python3.10/site-packages/pybind11/include/pybind11/options.h \ + /home/gear/miniconda3/envs/decoupled_wbc/lib/python3.10/site-packages/pybind11/include/pybind11/detail/exception_translation.h \ + /home/gear/miniconda3/envs/decoupled_wbc/lib/python3.10/site-packages/pybind11/include/pybind11/detail/init.h \ + /home/gear/miniconda3/envs/decoupled_wbc/lib/python3.10/site-packages/pybind11/include/pybind11/gil.h \ + /home/gear/miniconda3/envs/decoupled_wbc/lib/python3.10/site-packages/pybind11/include/pybind11/gil_safe_call_once.h \ + /home/gear/miniconda3/envs/decoupled_wbc/lib/python3.10/site-packages/pybind11/include/pybind11/typing.h diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/objects/Main.o b/decoupled_wbc/control/teleop/device/SDKClient_Linux/objects/Main.o similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/objects/Main.o rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/objects/Main.o diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/objects/SDKClient.d b/decoupled_wbc/control/teleop/device/SDKClient_Linux/objects/SDKClient.d similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/objects/SDKClient.d rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/objects/SDKClient.d diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/objects/SDKClient.o b/decoupled_wbc/control/teleop/device/SDKClient_Linux/objects/SDKClient.o similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/objects/SDKClient.o rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/objects/SDKClient.o diff --git a/gr00t_wbc/control/teleop/device/SDKClient_Linux/test.py b/decoupled_wbc/control/teleop/device/SDKClient_Linux/test.py similarity index 100% rename from gr00t_wbc/control/teleop/device/SDKClient_Linux/test.py rename to decoupled_wbc/control/teleop/device/SDKClient_Linux/test.py diff --git a/gr00t_wbc/control/teleop/device/iphone/iphone.py b/decoupled_wbc/control/teleop/device/iphone/iphone.py similarity index 100% rename from gr00t_wbc/control/teleop/device/iphone/iphone.py rename to decoupled_wbc/control/teleop/device/iphone/iphone.py diff --git a/decoupled_wbc/control/teleop/device/manus.py b/decoupled_wbc/control/teleop/device/manus.py new file mode 100644 index 0000000..aa76341 --- /dev/null +++ b/decoupled_wbc/control/teleop/device/manus.py @@ -0,0 +1,138 @@ +from contextlib import contextmanager +import pickle +import time + +import click +import numpy as np +from scipy.spatial.transform import Rotation as R +import zmq + +from decoupled_wbc.control.teleop.device.SDKClient_Linux import ManusServer + +manus_idx = { + "left": ["3822396207", "3998055887", "432908014"], + "right": ["3762867141", "831307785", "3585023564"], +} + + +class Manus: + def __init__(self, port=5556): + # Storage for the latest finger data + self.latest_finger_data = { + "left_fingers": {"angle": np.zeros([40]), "position": np.zeros([25, 4, 4])}, + "right_fingers": {"angle": np.zeros([40]), "position": np.zeros([25, 4, 4])}, + } + self.manus_server = None + self.port = port + + def process_finger_pose(self, raw_position, raw_orientation, dir): + raw_position = np.asarray(raw_position).reshape([-1, 3]) + raw_orientation = np.asarray(raw_orientation).reshape([-1, 4]) + + def reorder(data): + return np.concatenate( + [ + data[0:1], # root + data[21:25], # thumb + data[1:6], # index + data[6:11], # middle + data[16:21], # ring + data[11:16], # pinky + ] + ) + + raw_position = reorder(raw_position) + raw_orientation = reorder(raw_orientation) + + transformation_matrix = np.zeros([25, 4, 4]) + rot_matrix = R.from_quat(raw_orientation[0][[1, 2, 3, 0]]).as_matrix() + transformation_matrix[:, :3, :3] = rot_matrix + transformation_matrix[:, :3, 3] = raw_position + transformation_matrix[:, 3, 3] = 1.0 + + T_root = transformation_matrix[0] + T_root_inv = np.linalg.inv(T_root) + transformation_matrix = np.matmul(T_root_inv[None], transformation_matrix) + + T_manus2avp = np.identity(4) + if dir == "right": + T_manus2avp[:3, :3] = R.from_euler("zx", [180, -90], degrees=True).as_matrix() + else: + T_manus2avp[:3, :3] = R.from_euler("x", [90], degrees=True).as_matrix() + transformation_matrix = np.matmul(T_manus2avp[None], transformation_matrix) + + return transformation_matrix + + def process_finger_angle(self, raw_finger_data, dir): + if dir == "right": + non_zero_data = [value for value in raw_finger_data if value != 0.0] + trailing_zeros_count = len(raw_finger_data) - len(non_zero_data) + raw_finger_data = non_zero_data + [0.0] * trailing_zeros_count + + return raw_finger_data + + def request_finger_data(self): + output = ManusServer.get_latest_state() + for dir, val in manus_idx.items(): + for id in val: + angle_name = f"{id}_angle" + if angle_name in output and len(output[angle_name]) > 0: + self.latest_finger_data[f"{dir}_fingers"]["angle"] = self.process_finger_angle( + np.asarray(output[angle_name]), dir + ) + + position_name = f"{id}_position" + orientation_name = f"{id}_orientation" + if ( + position_name in output + and orientation_name in output + and len(output[position_name]) > 0 + and len(output[orientation_name]) > 0 + ): + self.latest_finger_data[f"{dir}_fingers"]["position"] = ( + self.process_finger_pose( + output[position_name], output[orientation_name], dir + ) + ) + + return self.latest_finger_data + + @contextmanager + def activate(self): + try: + ManusServer.init() + self.context = zmq.Context() + self.socket = self.context.socket(zmq.REP) + self.socket.bind(f"tcp://*:{self.port}") + yield self + finally: + ManusServer.shutdown() + + def run(self): + while True: + # Wait for a request from the client + _ = self.socket.recv() + # Process finger data + data = self.request_finger_data() + data["timestamp"] = time.time() + + # Serialize the data to send back + serialized_data = pickle.dumps(data) + + # Send the serialized data back to the client + self.socket.send(serialized_data) + + +@click.command() +@click.option("--port", type=int, default=5556) +def main(port): + print("... starting manus server ... at port", port, flush=True) + manus = Manus(port=port) + print("... manus server activating ...") + with manus.activate(): + print("==> Manus server is running at port", port, flush=True) + manus.run() + + +if __name__ == "__main__": + main() diff --git a/gr00t_wbc/control/teleop/device/pico/XRoboToolkit_PC_Service_1.0.0_ubuntu_22.04_amd64.deb b/decoupled_wbc/control/teleop/device/pico/XRoboToolkit_PC_Service_1.0.0_ubuntu_22.04_amd64.deb similarity index 100% rename from gr00t_wbc/control/teleop/device/pico/XRoboToolkit_PC_Service_1.0.0_ubuntu_22.04_amd64.deb rename to decoupled_wbc/control/teleop/device/pico/XRoboToolkit_PC_Service_1.0.0_ubuntu_22.04_amd64.deb diff --git a/gr00t_wbc/control/teleop/device/pico/roboticsservice_1.0.0.0_arm64.deb b/decoupled_wbc/control/teleop/device/pico/roboticsservice_1.0.0.0_arm64.deb similarity index 100% rename from gr00t_wbc/control/teleop/device/pico/roboticsservice_1.0.0.0_arm64.deb rename to decoupled_wbc/control/teleop/device/pico/roboticsservice_1.0.0.0_arm64.deb diff --git a/gr00t_wbc/control/teleop/device/pico/xr_client.py b/decoupled_wbc/control/teleop/device/pico/xr_client.py similarity index 100% rename from gr00t_wbc/control/teleop/device/pico/xr_client.py rename to decoupled_wbc/control/teleop/device/pico/xr_client.py diff --git a/decoupled_wbc/control/teleop/gui/cli.py b/decoupled_wbc/control/teleop/gui/cli.py new file mode 100644 index 0000000..b37996c --- /dev/null +++ b/decoupled_wbc/control/teleop/gui/cli.py @@ -0,0 +1,20 @@ +import subprocess + +import click + + +@click.command() +def teleop(): + """CLI for interacting with the osmo.""" + subprocess.run(["python", "decoupled_wbc/control/teleop/gui/main.py"]) + + +@click.group() +def cli(): + """CLI for interacting with the osmo.""" + + +cli.add_command(teleop) + +if __name__ == "__main__": + cli() diff --git a/gr00t_wbc/control/teleop/gui/core/events3d.py b/decoupled_wbc/control/teleop/gui/core/events3d.py similarity index 100% rename from gr00t_wbc/control/teleop/gui/core/events3d.py rename to decoupled_wbc/control/teleop/gui/core/events3d.py diff --git a/gr00t_wbc/control/teleop/gui/core/gui3d.py b/decoupled_wbc/control/teleop/gui/core/gui3d.py similarity index 100% rename from gr00t_wbc/control/teleop/gui/core/gui3d.py rename to decoupled_wbc/control/teleop/gui/core/gui3d.py diff --git a/gr00t_wbc/control/teleop/gui/core/guicommon.py b/decoupled_wbc/control/teleop/gui/core/guicommon.py similarity index 100% rename from gr00t_wbc/control/teleop/gui/core/guicommon.py rename to decoupled_wbc/control/teleop/gui/core/guicommon.py diff --git a/gr00t_wbc/control/teleop/gui/core/module3d.py b/decoupled_wbc/control/teleop/gui/core/module3d.py similarity index 100% rename from gr00t_wbc/control/teleop/gui/core/module3d.py rename to decoupled_wbc/control/teleop/gui/core/module3d.py diff --git a/gr00t_wbc/control/teleop/gui/core/selection.py b/decoupled_wbc/control/teleop/gui/core/selection.py similarity index 100% rename from gr00t_wbc/control/teleop/gui/core/selection.py rename to decoupled_wbc/control/teleop/gui/core/selection.py diff --git a/gr00t_wbc/control/teleop/gui/library/getpath.py b/decoupled_wbc/control/teleop/gui/library/getpath.py similarity index 100% rename from gr00t_wbc/control/teleop/gui/library/getpath.py rename to decoupled_wbc/control/teleop/gui/library/getpath.py diff --git a/gr00t_wbc/control/teleop/gui/library/image.py b/decoupled_wbc/control/teleop/gui/library/image.py similarity index 100% rename from gr00t_wbc/control/teleop/gui/library/image.py rename to decoupled_wbc/control/teleop/gui/library/image.py diff --git a/gr00t_wbc/control/teleop/gui/library/language.py b/decoupled_wbc/control/teleop/gui/library/language.py similarity index 100% rename from gr00t_wbc/control/teleop/gui/library/language.py rename to decoupled_wbc/control/teleop/gui/library/language.py diff --git a/gr00t_wbc/control/teleop/gui/library/log.py b/decoupled_wbc/control/teleop/gui/library/log.py similarity index 100% rename from gr00t_wbc/control/teleop/gui/library/log.py rename to decoupled_wbc/control/teleop/gui/library/log.py diff --git a/gr00t_wbc/control/teleop/gui/library/matrix.py b/decoupled_wbc/control/teleop/gui/library/matrix.py similarity index 100% rename from gr00t_wbc/control/teleop/gui/library/matrix.py rename to decoupled_wbc/control/teleop/gui/library/matrix.py diff --git a/gr00t_wbc/control/teleop/gui/library/mh.py b/decoupled_wbc/control/teleop/gui/library/mh.py similarity index 100% rename from gr00t_wbc/control/teleop/gui/library/mh.py rename to decoupled_wbc/control/teleop/gui/library/mh.py diff --git a/gr00t_wbc/control/teleop/gui/library/profiler.py b/decoupled_wbc/control/teleop/gui/library/profiler.py similarity index 100% rename from gr00t_wbc/control/teleop/gui/library/profiler.py rename to decoupled_wbc/control/teleop/gui/library/profiler.py diff --git a/gr00t_wbc/control/teleop/gui/library/qtgui.py b/decoupled_wbc/control/teleop/gui/library/qtgui.py similarity index 100% rename from gr00t_wbc/control/teleop/gui/library/qtgui.py rename to decoupled_wbc/control/teleop/gui/library/qtgui.py diff --git a/gr00t_wbc/control/teleop/gui/library/universal.py b/decoupled_wbc/control/teleop/gui/library/universal.py similarity index 100% rename from gr00t_wbc/control/teleop/gui/library/universal.py rename to decoupled_wbc/control/teleop/gui/library/universal.py diff --git a/gr00t_wbc/control/teleop/gui/library/xdg_parser.py b/decoupled_wbc/control/teleop/gui/library/xdg_parser.py similarity index 100% rename from gr00t_wbc/control/teleop/gui/library/xdg_parser.py rename to decoupled_wbc/control/teleop/gui/library/xdg_parser.py diff --git a/decoupled_wbc/control/teleop/gui/main.py b/decoupled_wbc/control/teleop/gui/main.py new file mode 100644 index 0000000..ad6cbd3 --- /dev/null +++ b/decoupled_wbc/control/teleop/gui/main.py @@ -0,0 +1,547 @@ +import json +import os +import signal +import subprocess +import sys +from typing import Dict + +import library.log as log +import library.qtgui as qtgui +from library.universal import G +from PyQt6.QtCore import QProcess, QSettings, Qt +from PyQt6.QtWidgets import ( + QApplication, + QCheckBox, + QComboBox, + QFormLayout, + QGridLayout, + QHBoxLayout, + QLabel, + QLineEdit, + QMainWindow, + QPushButton, + QVBoxLayout, + QWidget, +) + +import decoupled_wbc + +signal.signal(signal.SIGINT, signal.SIG_DFL) + + +GR00T_TELEOP_DATA_ROOT = os.path.join(os.path.dirname(decoupled_wbc.__file__), "./external/teleop/data") + + +class MainWindow(QMainWindow): + def __init__( + self, + fix_pick_up=True, + fix_pointing=False, + object_A_options=[ + "small cup", + "dragon fruit", + "orange", + "apple", + "mango", + "star fruit", + "rubiks cube", + "lemon", + "lime", + "red can", + "blue can", + "green can", + "cucumber", + "bottled water", + "big cup", + "mayo", + "mustard", + "bok choy", + "grapes", + "soup can", + "mouse", + "water apple", + "corn", + "mug", + "orange cup", + "bitter gourd", + "banana", + "mangosteen", + "marker", + "coffee pod", + "plastic cup", + "grapes", + "small mug", + "condiment bottles", + "corn", + "tools", + "pear", + "eggplant", + "canned beans", + "potato", + ], + object_from_options=[ + "cutting board", + "pan", + "plate", + "bowl", + "tray", + "desk", + "placemat", + "table", + "mesh cup", + "shelf", + ], + object_to_options=[ + "cutting board", + "pan", + "plate", + "bowl", + "tray", + "microwave", + "basket", + "drawer", + "placemat", + "clear bin", + "mesh cup", + "yellow bin", + "shelf", + ], + ): + super().__init__() + # Fix the pick up and place object type + self.fix_pick_up = fix_pick_up + self.fix_pointing = fix_pointing + + # window settings + self.setWindowTitle("Gr00t Capture Test") + self.setFixedWidth(800) + self.setMinimumHeight(1000) + + self.main_layout = QVBoxLayout() + self.main_layout.setAlignment(Qt.AlignmentFlag.AlignTop) + + # stop button + self.stop_button = QPushButton("Emergency Stop", self) + self.stop_button.setStyleSheet( + "background-color: red; color: white; font-size: 20px; font-weight: bold; height: 60px;" + ) + # self.stop_button.setEnabled(False) + self.stop_button.clicked.connect(self.stop_process) + self.main_layout.addWidget(self.stop_button) + + # property label + self.property_label = QLabel("Settings") + self.property_label.setStyleSheet( + "font-size: 20px; font-weight: bold; color: #333; max-hedescription.jight: 30px;" + ) + self.main_layout.addWidget(self.property_label) + + # operator form + settings_form_layout = QFormLayout() + self.operator_input = QLineEdit("Zu") + # self.operator_input.textChanged.connect(self.property_label.setText) + settings_form_layout.addRow(QLabel("Operator Name:"), self.operator_input) + + # collector form + self.collector_input = QLineEdit("Zu") + settings_form_layout.addRow(QLabel("Collector Name:"), self.collector_input) + + # description form + self.description_input = QLineEdit("3D pick up") + + # object A menu + self.object_A_input = QComboBox() + self.object_A_input.addItems(object_A_options) + + # object A menu + self.object_from_input = QComboBox() + self.object_from_input.addItems(object_from_options) + + # object B menu + self.object_to_input = QComboBox() + self.object_to_input.addItems(object_to_options) + + box_layout = QHBoxLayout() + box_layout.setAlignment(Qt.AlignmentFlag.AlignLeft) + if self.fix_pick_up: + # box_layout.addWidget(QLabel("Pour Object:")) + box_layout.addWidget(QLabel("Pick up Object:")) + box_layout.addWidget(self.object_A_input) + box_layout.addWidget(QLabel("from:")) + box_layout.addWidget(self.object_from_input) + box_layout.addWidget(QLabel("To:")) + box_layout.addWidget(self.object_to_input) + elif self.fix_pointing: + box_layout.addWidget(QLabel("Pointing Object:")) + box_layout.addWidget(self.object_A_input) + else: + box_layout.addWidget(self.description_input) + + # add a button to fix the pick up and place object + self.save_description_button = QPushButton("Save Description") + self.save_description_button.setMinimumWidth(100) + self.save_description_button.clicked.connect(self.save_settings) + box_layout.addWidget(self.save_description_button) + + settings_form_layout.addRow(QLabel("Task description:"), box_layout) + + # robot form + self.robot_input = QLineEdit("gr00t002") + settings_form_layout.addRow(QLabel("Robot Name:"), self.robot_input) + + # vive keyword form + self.vive_keyword_input = QComboBox() + self.vive_keyword_input.addItems(["elbow", "knee", "wrist", "shoulder", "foot"]) + settings_form_layout.addRow(QLabel("Vive keyword:"), self.vive_keyword_input) + + settings_form_layout.addRow(QLabel("-" * 300)) + self.vive_ip_input = QLineEdit("192.168.0.182") + settings_form_layout.addRow(QLabel("Vive ip:"), self.vive_ip_input) + + self.vive_port_input = QLineEdit("5555") + settings_form_layout.addRow(QLabel("Vive port:"), self.vive_port_input) + self.manus_port_input = QLineEdit("5556") + settings_form_layout.addRow(QLabel("Manus port:"), self.manus_port_input) + + # manus form + # self.manus_input = QLineEdit("foot") + # settings_form_layout.addRow(QLabel("Manus name:"), self.manus_input) + + self.main_layout.addLayout(settings_form_layout) + + # testing buttons + self.testing_button_layout = QGridLayout() + + # vive button + self.test_vive_button = QPushButton("Test Vive") + self.test_vive_button.setCheckable(True) + self.test_vive_button.toggled.connect(self.test_vive) + self.test_vive_button.setMaximumHeight(30) + self.testing_button_layout.addWidget(self.test_vive_button, 0, 1) + self.vive_checkbox = QCheckBox("Vive Ready") + self.testing_button_layout.addWidget(self.vive_checkbox, 0, 2) + + # start manus button + self.server_manus_button = QPushButton("Start Manus") + self.server_manus_button.setCheckable(True) + self.server_manus_button.toggled.connect(self.test_manus_server) + self.server_manus_button.setMaximumHeight(30) + self.testing_button_layout.addWidget(self.server_manus_button, 1, 0) + + # manus button + self.client_manus_button = QPushButton("Test Manus") + self.client_manus_button.setCheckable(True) + self.client_manus_button.toggled.connect(self.test_manus_client) + self.client_manus_button.setMaximumHeight(30) + self.testing_button_layout.addWidget(self.client_manus_button, 1, 1) + + self.manus_checkbox = QCheckBox("Manus Ready") + self.manus_checkbox.setEnabled(False) + self.testing_button_layout.addWidget(self.manus_checkbox, 1, 2) + + # self.testing_button_layout.addWidget() + self.main_layout.addLayout(self.testing_button_layout) + + container = QWidget() + container.setLayout(self.main_layout) + + # Set the central widget of the Window. + self.setCentralWidget(container) + + # Process + # QProcess for running scripts + self.process = QProcess(self) + # self.process.readyRead.connect(self.handle_stdout) + self.process.readyReadStandardOutput.connect(self.handle_stdout) + self.process.readyReadStandardError.connect(self.handle_stderr) + self.process.errorOccurred.connect(self.process_error) + self.process.finished.connect(self.process_finished) + self.current_process_name = "" + + # QProcess for running scripts + self.manus_process = QProcess(self) + # self.process.readyRead.connect(self.handle_stdout) + self.manus_process.readyReadStandardOutput.connect(self.handle_stdout) + self.manus_process.readyReadStandardError.connect(self.handle_stderr) + self.manus_process.finished.connect(self.process_finished) + + # load history + self.load_settings() + + # record all buttons + self.test_buttons: Dict[str, QPushButton] = { + "test_vive": self.test_vive_button, + "test_manus": self.client_manus_button, + } + + def load_settings(self): + settings = QSettings("Gear", "Teleop") + self.vive_ip_input.setText(settings.value("vive_ip", "192.168.0.182")) + self.vive_keyword_input.setCurrentIndex(int(settings.value("vive_keyword", 0))) + self.robot_input.setText(settings.value("robot_input", "")) + self.operator_input.setText(settings.value("operator", "")) + self.collector_input.setText(settings.value("data_collector", "")) + self.description_input.setText(settings.value("description", "")) + self.object_A_input.setCurrentIndex(int(settings.value("object", 0))) + self.object_from_input.setCurrentIndex(int(settings.value("object_from", 0))) + self.object_to_input.setCurrentIndex(int(settings.value("object_to", 0))) + + def save_settings(self): + # Create QSettings object (You can provide your app name and organization for persistent storage) + settings = QSettings("Gear", "Teleop") + + # Save the text from the QLineEdit + settings.setValue("vive_ip", self.vive_ip_input.text()) + settings.setValue("vive_keyword", self.vive_keyword_input.currentIndex()) + settings.setValue("robot_input", self.robot_input.text()) + settings.setValue("operator", self.operator_input.text()) + settings.setValue("data_collector", self.collector_input.text()) + + if self.fix_pick_up: + description = ( + f"pick {self.object_A_input.currentText()} " + f"{self.object_from_input.currentText()}->" + f"{self.object_to_input.currentText()}" + ) + elif self.fix_pointing: + description = f"point at {self.object_A_input.currentText()}" + else: + description = self.description_input.text() + + settings.setValue("description", description) + settings.setValue("object", self.object_A_input.currentIndex()) + settings.setValue("object_from", self.object_from_input.currentIndex()) + settings.setValue("object_to", self.object_to_input.currentIndex()) + + # Save the settings + descrition_file = os.path.join(GR00T_TELEOP_DATA_ROOT, "description.json") + with open(descrition_file, "w") as f: + json.dump( + { + "operator": self.operator_input.text(), + "data_collector": self.collector_input.text(), + "description": description, + }, + f, + ) + print("Settings saved to {}".format(descrition_file)) + + ################################# Process ################################# + def handle_stdout(self): + data = self.process.readAllStandardOutput().data() + stdout = data.decode("utf-8") + G.app.log_window.addLogMessage(stdout, log.DEBUG) + + data = self.manus_process.readAllStandardOutput().data() + stdout = data.decode("utf-8") + G.app.log_window.addLogMessage(stdout, log.DEBUG) + + def handle_stderr(self): + data = self.process.readAllStandardError().data() + stderr = data.decode("utf-8") + G.app.log_window.addLogMessage(stderr, log.ERROR) + + data = self.manus_process.readAllStandardError().data() + stderr = data.decode("utf-8") + G.app.log_window.addLogMessage(stderr, log.ERROR) + + def process_finished(self, exit_code, exit_status): + if exit_status == QProcess.ExitStatus.NormalExit: + G.app.log_window.addLogMessage( + f"Process finished successfully with exit code {exit_code}.", log.INFO + ) + else: + G.app.log_window.addLogMessage( + f"Process finished with error code {exit_code}.", log.ERROR + ) + + G.app.log_window.addLogMessage("---------------------------------------------", log.INFO) + + # toggle the button for the current task + print("current_process_name", self.current_process_name, self.test_buttons.keys()) + if self.current_process_name in self.test_buttons.keys(): + if self.test_buttons[self.current_process_name].isChecked(): + self.test_buttons[self.current_process_name].click() + + def process_error(self, error): + G.app.log_window.addLogMessage(f"Process Error (or stopped manually): {error}", log.ERROR) + G.app.log_window.addLogMessage("---------------------------------------------", log.INFO) + + ################################# Timer ################################# + + def update_log(self): + print("update log") + # G.app.log_window.addLogMessage("Update log", log.DEBUG) + returnBool = self.process.waitForFinished(1000) + print("returnBool", returnBool) + if not returnBool: + data = self.process.readAllStandardOutput().data() + stdout = data.decode("utf8") + if stdout: + G.app.log_window.addLogMessage(stdout, log.DEBUG) + G.app.log_window.updateView() + + def test_vive(self, checked): + print("checked", checked) + if not checked: + self.test_vive_button.setText("Test Vive") + self.stop_process() + self.current_process_name = "" + else: + self.current_process_name = "test_vive" + self.test_vive_button.setText("Stop Test") + self.stop_button.setEnabled(True) + G.app.log_window.addLogMessage("Testing Vive", log.DEBUG) + script = "python" + args = [ + "decoupled_wbc/control/teleop/main/test_vive.py", + "--keyword", + self.vive_keyword_input.currentText(), + "--ip", + self.vive_ip_input.text(), + "--port", + self.vive_port_input.text(), + ] + self.process.start(script, args) + + def test_manus_server(self, checked): + if not checked: # 2 + self.server_manus_button.setText("Start Manus") + self.stop_manus_server() + self.manus_checkbox.setEnabled(False) + else: # 1 + self.server_manus_button.setText("Stop Manus") + self.manus_checkbox.setEnabled(True) + self.start_manus_server() + + def start_manus_server(self): + G.app.log_window.addLogMessage("Starting manus server", log.WARNING) + script = "python" + args = ["decoupled_wbc/control/teleop/device/manus.py", "--port", self.manus_port_input.text()] + self.manus_process.start(script, args) + + def stop_manus_server(self): + G.app.log_window.addLogMessage("Stopping manus server", log.WARNING) + self.manus_process.terminate() + self.manus_process.waitForFinished(2000) + if self.manus_process.state() != QProcess.ProcessState.NotRunning: + self.manus_process.kill() + self.stop_button.setEnabled(False) + G.app.log_window.addLogMessage("Manus stopped", log.DEBUG) + + def test_manus_client(self, checked): + if not checked: + self.client_manus_button.setText("Test Manus") + self.stop_process() + self.current_process_name = "" + else: + self.current_process_name = "test_manus" + self.client_manus_button.setText("Stop Test") + self.stop_button.setEnabled(True) + G.app.log_window.addLogMessage("Testing", log.DEBUG) + print("Testing manus") + script = "python" + args = [ + "decoupled_wbc/control/teleop/main/test_manus.py", + "--port", + self.manus_port_input.text(), + ] + self.process.start(script, args) + + ############################### Stop Process ################################# + + def stop_process(self): + G.app.log_window.addLogMessage("Stopping process", log.WARNING) + # disable all buttons + for button in self.test_buttons.values(): + button.setCheckable(False) + button.setChecked(False) + button.setEnabled(False) + + # kill the process + if self.process.state() == QProcess.ProcessState.Running: + # os.kill(self.process.processId(), signal.SIGINT) + self.process.waitForFinished(2000) # Wait for up to 2 seconds for termination + if self.process.state() != QProcess.ProcessState.NotRunning: + self.process.kill() + # self.stop_button.setEnabled(False) + G.app.log_window.addLogMessage("Script stopped", log.DEBUG) + + if self.current_process_name in ["test_oak", "record_demonstration", "test_robot"]: + command = ( + f"ps aux | grep {self.current_process_name}" + + " | grep -v grep | awk '{print $2}' | xargs kill" + ) + + # Execute the command with subprocess.Popen, capturing both stdout and stderr + sub_process = subprocess.Popen( + command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + # Get the output and errors (if any) + stdout, stderr = sub_process.communicate() + # Ensure the process is successfully terminated + sub_process.wait() + + # enable all buttons + for button in self.test_buttons.values(): + button.setCheckable(True) + button.setEnabled(True) + + # check the button if the button was checked + if self.current_process_name in self.test_buttons.keys(): + if self.test_buttons[self.current_process_name].isChecked(): + self.test_buttons[self.current_process_name].setChecked(False) + + +class LogWindow(qtgui.ListView): + def __init__(self): + super(LogWindow, self).__init__() + self.level = log.DEBUG + self.allowMultipleSelection(True) + + def setLevel(self, level): + self.level = level + self.updateView() + + def keyPressEvent(self, e): + e.ignore() + + def updateView(self): + for i in range(self.count()): + ilevel = self.getItemData(i) + self.showItem(i, ilevel >= self.level) + self.setItemColor(i, log.getLevelColor(ilevel)) + + def addLogMessage(self, text, level=log.INFO): + index = self.count() + color = log.getLevelColor(level) + self.addLogItem(text, color, level) + self.showItem(index, level >= self.level) + + +class GCApplication(QApplication): + def __init__(self, argv): + super(GCApplication, self).__init__(argv) + + # application + if G.app is not None: + raise RuntimeError("MHApplication is a singleton") + G.app = self + + self.mainwin = MainWindow() + self.log_window = LogWindow() + self.mainwin.main_layout.addWidget(self.log_window) + self.mainwin.show() + # self.log_window.show() + + +# Function to terminate the subprocess gracefully +def terminate_subprocess(proc): + if proc.poll() is None: # Check if the process is still running + proc.terminate() # Terminate the process + print("Subprocess terminated.") + + +if __name__ == "__main__": + app = GCApplication(sys.argv) + sys.exit(app.exec()) diff --git a/decoupled_wbc/control/teleop/main/test_iphone.py b/decoupled_wbc/control/teleop/main/test_iphone.py new file mode 100644 index 0000000..f46bca6 --- /dev/null +++ b/decoupled_wbc/control/teleop/main/test_iphone.py @@ -0,0 +1,306 @@ +import argparse +import time + +import matplotlib.pyplot as plt +import numpy as np + +from decoupled_wbc.control.teleop.device.iphone.iphone import IPhoneDevice + + +class IphonePublisher: + def __init__(self, port: int = 5555, silent: bool = True): + self._device = IPhoneDevice(port=port, silent=silent) + + # record the initial transform + self._init_transform = None + self._init_transform_inverse = None + + # record the latest transform and timestamp + self._latest_transform = None + self._latest_timestamp = None + + # publishing variables + self._velocity = [0, 0, 0] + self._position = [0, 0, 0] + + # visualization + self._fig = None + self._ax = None + self._setup_visualization() + + def _draw_axes(self, ax): + """Draw coordinate axes, labels, and legend on the given axes object.""" + origin = [0, 0, 0] + axis_length = 0.3 + # X axis (right) + ax.quiver( + origin[0], + origin[1], + origin[2], + axis_length, + 0, + 0, + color="r", + arrow_length_ratio=0.1, + linewidth=2, + ) + # Y axis (out/forward) + ax.quiver( + origin[0], + origin[1], + origin[2], + 0, + axis_length, + 0, + color="b", + arrow_length_ratio=0.1, + linewidth=2, + ) + # Z axis (up) + ax.quiver( + origin[0], + origin[1], + origin[2], + 0, + 0, + axis_length, + color="g", + arrow_length_ratio=0.1, + linewidth=2, + ) + ax.text(axis_length, 0, 0, "X", color="red") + ax.text(0, axis_length, 0, "Y", color="blue") + ax.text(0, 0, axis_length, "Z", color="green") + from matplotlib.lines import Line2D + + legend_elements = [ + Line2D([0], [0], color="r", lw=2, label="X axis (right)"), + Line2D([0], [0], color="g", lw=2, label="Y axis (out)"), + Line2D([0], [0], color="b", lw=2, label="Z axis (up)"), + ] + ax.legend(handles=legend_elements, loc="upper right") + + def _draw_cube(self, ax): + """Draw a 1x1x1 meter cube centered at the origin on the given axes object.""" + r = [-0.5, 0.5] + corners = [ + [r[0], r[0], r[0]], + [r[0], r[0], r[1]], + [r[0], r[1], r[0]], + [r[0], r[1], r[1]], + [r[1], r[0], r[0]], + [r[1], r[0], r[1]], + [r[1], r[1], r[0]], + [r[1], r[1], r[1]], + ] + edges = [ + (0, 1), + (0, 2), + (0, 4), + (1, 3), + (1, 5), + (2, 3), + (2, 6), + (3, 7), + (4, 5), + (4, 6), + (5, 7), + (6, 7), + ] + for i, j in edges: + x = [corners[i][0], corners[j][0]] + y = [corners[i][1], corners[j][1]] + z = [corners[i][2], corners[j][2]] + ax.plot3D(x, y, z, "gray") + + def _setup_visualization(self): + """Setup matplotlib figure for 6D pose visualization""" + plt.ion() # Enable interactive mode + self._fig = plt.figure(figsize=(8, 8)) + self._ax = self._fig.add_subplot(111, projection="3d") + self._ax.set_xlim([-0.5, 0.5]) + self._ax.set_ylim([-0.5, 0.5]) + self._ax.set_zlim([-0.5, 0.5]) + self._ax.set_xlabel("X (right)") + self._ax.set_ylabel("Y (out)") + self._ax.set_zlabel("Z (up)") + self._ax.set_title("iPhone 6D Pose") + self._ax.view_init(elev=30, azim=-45) + self._draw_cube(self._ax) + self._draw_axes(self._ax) + plt.tight_layout() + self._fig.canvas.draw() + plt.pause(0.001) + + def start(self): + self._device.start() + + def stop(self): + self._device.stop() + if self._fig is not None: + plt.close(self._fig) + + def reset_transform(self, *args, timeout=5.0, poll_interval=0.1): + """ + Wait until device returns data, then set the initial transform. + + Args: + timeout (float): Maximum time to wait in seconds. + poll_interval (float): Time between checks in seconds. + """ + start_time = time.time() + while True: + data = self._device.get_cmd() + if data: + self._init_transform = np.array(data.get("transformMatrix")) + self._init_transform_inverse = np.linalg.inv(self._init_transform) + print( + "initial position", [round(v, 4) for v in self._init_transform[:3, 3].tolist()] + ) + self._latest_transform = self._init_transform.copy() + self._latest_timestamp = data.get("timestamp") + print("Initial transform set.") + break + elif time.time() - start_time > timeout: + print("Timeout: Failed to get initial transform data after waiting.") + break + else: + time.sleep(poll_interval) + + return {"success": True, "message": "Triggered!"} + + def update_transfrom(self) -> dict: + data = self._device.get_cmd() + + if data: + transform_matrix = np.array(data.get("transformMatrix")) + position = transform_matrix[:3, 3] + rotation = transform_matrix[:3, :3] + + # Create a single multiline string with position and rotation data + output = f"\r\033[KPosition: {[round(v, 4) for v in position.tolist()]}\n" + output += "\033[KRotation matrix:\n" + for i in range(3): + row = [round(v, 4) for v in rotation[i].tolist()] + output += f"\033[K {row}\n" + + # Move cursor up 5 lines (position + "rotation matrix:" + 3 rows) + # and print the entire output at once + print(f"\033[5A{output}", end="", flush=True) + + # draw 6d pose + self._visualize_pose(position, rotation) + + if data: + current_transform = np.array(data.get("transformMatrix")) @ self._init_transform_inverse + # print("current_transform", [round(v, 4) for v in current_transform.flatten().tolist()]) + current_timestamp = data.get("timestamp") + + # Check if the current timestamp is the same as the latest timestamp (No update or disconnect) + if current_timestamp == self._latest_timestamp: + return { + "transform_matrix": self._latest_transform, + "velocity": [0, 0, 0], + "position": self._latest_transform[:3, 3].tolist(), + "timestamp": self._latest_timestamp, + } + + if self._latest_transform is not None: + current_position = current_transform[:3, 3] + current_velocity = (current_position - self._latest_transform[:3, 3]) / ( + current_timestamp - self._latest_timestamp + ) + self._velocity = current_velocity.tolist() + self._position = current_position.tolist() + + self._latest_transform = current_transform.copy() + self._latest_timestamp = current_timestamp + + return { + "transform_matrix": self._latest_transform, + "velocity": self._velocity, + "position": self._position, + "timestamp": self._latest_timestamp, + } + + def _visualize_pose(self, position, rotation): + """Visualize the 6D pose using matplotlib""" + if self._fig is None or not plt.fignum_exists(self._fig.number): + self._setup_visualization() + self._ax.clear() + self._ax.set_xlim([-0.5, 0.5]) + self._ax.set_ylim([-0.5, 0.5]) + self._ax.set_zlim([-0.5, 0.5]) + self._ax.set_xlabel("X (right)") + self._ax.set_ylabel("Y (out)") + self._ax.set_zlabel("Z (up)") + self._ax.set_title("iPhone 6D Pose") + self._ax.view_init(elev=30, azim=-45) + self._draw_cube(self._ax) + self._draw_axes(self._ax) + # Draw position as a marker + pos = np.clip(position, -0.5, 0.5) + self._ax.scatter(pos[0], pos[1], pos[2], color="red", s=100, marker="o") + # Draw orientation axes + length = 0.2 + colors = ["r", "b", "g"] # X=red, Y=blue, Z=green + for i in range(3): + direction = rotation[:, i] * length + self._ax.quiver( + pos[0], + pos[1], + pos[2], + direction[0], + direction[1], + direction[2], + color=colors[i], + arrow_length_ratio=0.2, + linewidth=2, + ) + self._fig.canvas.draw() + plt.pause(0.001) + + +def main(): + # Parse command line arguments + parser = argparse.ArgumentParser() + parser.add_argument("--fps", type=int, default=20, help="Frames per second (default: 20)") + parser.add_argument( + "--timeout", + type=int, + default=30, + help="Timeout for waiting for the device to connect (default: 30 seconds)", + ) + parser.add_argument("--debug", action="store_true", help="Enable debug mode") + args = parser.parse_args() + fps = args.fps + timeout = args.timeout + + # Initialize the iPhone publisher + iphone_publisher = IphonePublisher(port=5555, silent=False) + iphone_publisher.start() + print("Waiting for device to connect...press the reset button on the device to start") + iphone_publisher.reset_transform( + timeout=timeout + ) # wait x seconds for the device to return data + + def update_and_visualize(): + # Update the transform and get the latest data + data = iphone_publisher.update_transfrom() + + # print the data for debugging + if args.debug and np.random.rand() < 1 / fps: + print("data", data) + + try: + while True: + update_and_visualize() + time.sleep(1.0 / fps) + except KeyboardInterrupt: + print("Stopping device...") + finally: + iphone_publisher.stop() + + +if __name__ == "__main__": + main() diff --git a/gr00t_wbc/control/teleop/main/test_manus.py b/decoupled_wbc/control/teleop/main/test_manus.py similarity index 100% rename from gr00t_wbc/control/teleop/main/test_manus.py rename to decoupled_wbc/control/teleop/main/test_manus.py diff --git a/decoupled_wbc/control/teleop/main/test_vive.py b/decoupled_wbc/control/teleop/main/test_vive.py new file mode 100644 index 0000000..7c957c7 --- /dev/null +++ b/decoupled_wbc/control/teleop/main/test_vive.py @@ -0,0 +1,82 @@ +import click +import matplotlib.animation as animation +import matplotlib.pyplot as plt +import numpy as np + +from decoupled_wbc.control.teleop.streamers.vive_streamer import ViveStreamer + + +@click.command() +@click.option("--ip", default="192.168.0.182", help="IP address of the Vive Tracker") +@click.option("--port", default=5555, help="Port number of the Vive Tracker") +@click.option("--keyword", default="elbow", help="Keyword to filter the tracker data") +def main(ip, port, keyword): + print("==>start test vive") + streamer = ViveStreamer(ip=ip, port=port, fps=20, keyword=keyword) + streamer.start_streaming() + + # Create figure and 3D axis + fig = plt.figure() + ax = fig.add_subplot(111, projection="3d") + + pos_data = {} + pose_data_init = {} + ax_line = {} + for dir in ["left", "right"]: + pose_data_init[dir] = None + pos_data[dir] = list() + # Initialize a line object that will be updated in the animation + color = "r" if dir == "left" else "b" + ax_line[dir] = ax.plot([], [], [], f"{color}-", marker="o")[0] + + # Set plot limits (adjust as needed) + ax.set_xlim(-1, 1) + ax.set_ylim(-1, 1) + ax.set_zlim(-1, 1) + + # Set axis labels + ax.set_xlabel("X") + ax.set_ylabel("Y") + ax.set_zlabel("Z") + + # Define the initialization function for the animation + def init(): + for dir in ["left", "right"]: + ax_line[dir].set_data([], []) + ax_line[dir].set_3d_properties([]) + + return ax_line["left"], ax_line["right"] + + # Define the update function that will update the trajectory in real-time + def update(num): + streamer_output = streamer.get() + + # Handle case where no data is available yet + if not streamer_output or not streamer_output.ik_data: + return ax_line["left"], ax_line["right"] + + for dir in ["left", "right"]: + wrist_key = f"{dir}_wrist" + if wrist_key not in streamer_output.ik_data: + continue + + raw_pose = streamer_output.ik_data[wrist_key] + if pose_data_init[dir] is None: + pose_data_init[dir] = raw_pose + relative_pose = np.linalg.inv(pose_data_init[dir]) @ raw_pose + pos_data[dir].append(relative_pose[:3, 3]) + pos_data_np = np.array(pos_data[dir]) + ax_line[dir].set_data(pos_data_np[:, 0], pos_data_np[:, 1]) + ax_line[dir].set_3d_properties(pos_data_np[:, 2]) + return ax_line["left"], ax_line["right"] + + # Create the animation + _ = animation.FuncAnimation(fig, update, init_func=init, frames=20, interval=50, blit=True) + + # Show the plot + plt.show(block=False) + plt.pause(5) + + +if __name__ == "__main__": + main() diff --git a/decoupled_wbc/control/teleop/pre_processor/fingers/fingers.py b/decoupled_wbc/control/teleop/pre_processor/fingers/fingers.py new file mode 100644 index 0000000..c9ec7a4 --- /dev/null +++ b/decoupled_wbc/control/teleop/pre_processor/fingers/fingers.py @@ -0,0 +1,16 @@ +from decoupled_wbc.control.teleop.pre_processor.pre_processor import PreProcessor + + +class FingersPreProcessor(PreProcessor): + """Dummy class just takes out the fingers from the data.""" + + def __init__(self, side: str): + super().__init__() + self.side = side + + def __call__(self, data): + return data[f"{self.side}_fingers"] + + # TODO: calibrate max and min + def calibrate(self, data, control_device): + pass diff --git a/gr00t_wbc/control/teleop/pre_processor/pre_processor.py b/decoupled_wbc/control/teleop/pre_processor/pre_processor.py similarity index 100% rename from gr00t_wbc/control/teleop/pre_processor/pre_processor.py rename to decoupled_wbc/control/teleop/pre_processor/pre_processor.py diff --git a/decoupled_wbc/control/teleop/pre_processor/wrists/wrists.py b/decoupled_wbc/control/teleop/pre_processor/wrists/wrists.py new file mode 100644 index 0000000..f5f8fd9 --- /dev/null +++ b/decoupled_wbc/control/teleop/pre_processor/wrists/wrists.py @@ -0,0 +1,110 @@ +from copy import deepcopy + +import numpy as np + +from decoupled_wbc.control.teleop.pre_processor.pre_processor import PreProcessor + +RIGHT_HAND_ROTATION = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]]) + + +class WristsPreProcessor(PreProcessor): + def __init__(self, motion_scale: float, **kwargs): + super().__init__(**kwargs) + self.motion_scale = motion_scale # scale factor for robot motion + self.calibration_ee_pose = {} # poses to calibrate the robot + self.ee_name_list = ( + [] + ) # name of the end-effector "link_head_pitch", "link_LArm7", "link_RArm7" + self.robot_world_T_init_ee = {} # initial end-effector pose of the robot + self.init_teleop_T_teleop_world = {} # initial transformation matrix + self.init_teleop_T_init_ee = {} # alignment of end effector and local teleop frames + + self.latest_data = None + + def calibrate(self, data, control_device): + left_elbow_joint_name = self.robot.supplemental_info.joint_name_mapping["elbow_pitch"][ + "left" + ] + right_elbow_joint_name = self.robot.supplemental_info.joint_name_mapping["elbow_pitch"][ + "right" + ] + if "left_wrist" in data: + self.ee_name_list.append(self.robot.supplemental_info.hand_frame_names["left"]) + self.calibration_ee_pose[left_elbow_joint_name] = ( + self.robot.supplemental_info.elbow_calibration_joint_angles["left"] + ) + if "right_wrist" in data: + self.ee_name_list.append(self.robot.supplemental_info.hand_frame_names["right"]) + self.calibration_ee_pose[right_elbow_joint_name] = ( + self.robot.supplemental_info.elbow_calibration_joint_angles["right"] + ) + + if self.calibration_ee_pose: + q = deepcopy(self.robot.q_zero) + # set pose + for joint, degree in self.calibration_ee_pose.items(): + joint_idx = self.robot.joint_to_dof_index[joint] + q[joint_idx] = np.deg2rad(degree) + self.robot.cache_forward_kinematics(q, auto_clip=False) + calibration_ee_poses = [ + self.robot.frame_placement(ee_name).np for ee_name in self.ee_name_list + ] + self.robot.reset_forward_kinematics() + else: + calibration_ee_poses = [ + self.robot.frame_placement(ee_name).np for ee_name in self.ee_name_list + ] + + for ee_name in self.ee_name_list: + self.robot_world_T_init_ee[ee_name] = deepcopy( + calibration_ee_poses[self.ee_name_list.index(ee_name)] + ) + self.init_teleop_T_teleop_world[ee_name] = ( + np.linalg.inv(deepcopy(data["left_wrist"])) + if ee_name == self.robot.supplemental_info.hand_frame_names["left"] + else np.linalg.inv(deepcopy(data["right_wrist"])) + ) + + # Initial teleop local frame is aligned with the initial end effector + # local frame with hardcoded rotations since we don't have a common + # reference frame for teleop and robot. + self.init_teleop_T_init_ee[ee_name] = np.eye(4) + if control_device == "pico": + # TODO: add pico wrist calibration respect to the headset frame + pass + else: + if ee_name == self.robot.supplemental_info.hand_frame_names["left"]: + self.init_teleop_T_init_ee[ee_name][ + :3, :3 + ] = self.robot.supplemental_info.hand_rotation_correction + else: + self.init_teleop_T_init_ee[ee_name][:3, :3] = ( + RIGHT_HAND_ROTATION @ self.robot.supplemental_info.hand_rotation_correction + ) + + def __call__(self, data) -> dict: + processed_data = {} + for ee_name in self.ee_name_list: + # Select wrist data based on ee_name + teleop_world_T_final_teleop = ( + data["left_wrist"] + if ee_name == self.robot.supplemental_info.hand_frame_names["left"] + else data["right_wrist"] + ) + init_teleop_T_final_teleop = ( + self.init_teleop_T_teleop_world[ee_name] @ teleop_world_T_final_teleop + ) + # End effector differential transform is teleop differential transform expressed + # in the end effector frame. + init_ee_T_final_ee = ( + np.linalg.inv(self.init_teleop_T_init_ee[ee_name]) + @ init_teleop_T_final_teleop + @ self.init_teleop_T_init_ee[ee_name] + ) + # Translation scaling + init_ee_T_final_ee[:3, 3] = self.motion_scale * init_ee_T_final_ee[:3, 3] + robot_world_T_final_ee = self.robot_world_T_init_ee[ee_name] @ init_ee_T_final_ee + processed_data[ee_name] = robot_world_T_final_ee + + self.latest_data = processed_data + return processed_data diff --git a/decoupled_wbc/control/teleop/solver/body/body_ik_solver.py b/decoupled_wbc/control/teleop/solver/body/body_ik_solver.py new file mode 100644 index 0000000..c208b61 --- /dev/null +++ b/decoupled_wbc/control/teleop/solver/body/body_ik_solver.py @@ -0,0 +1,179 @@ +from typing import Dict + +import numpy as np +import pink +from pink import solve_ik +from pink.tasks import FrameTask, PostureTask +import pinocchio as pin +import qpsolvers + +from decoupled_wbc.control.teleop.solver.body.body_ik_solver_settings import BodyIKSolverSettings +from decoupled_wbc.control.teleop.solver.solver import Solver + + +class WeightedPostureTask(PostureTask): + def __init__( + self, cost: float, weights: np.ndarray, lm_damping: float = 0.0, gain: float = 1.0 + ) -> None: + r"""Create weighted posture task. + + Args: + cost: value used to cast joint angle differences to a homogeneous + cost, in :math:`[\mathrm{cost}] / [\mathrm{rad}]`. + weights: vector of weights for each joint. + lm_damping: Unitless scale of the Levenberg-Marquardt (only when + the error is large) regularization term, which helps when + targets are unfeasible. Increase this value if the task is too + jerky under unfeasible targets, but beware that too large a + damping can slow down the task. + gain: Task gain :math:`\alpha \in [0, 1]` for additional low-pass + filtering. Defaults to 1.0 (no filtering) for dead-beat + control. + """ + super().__init__(cost=cost, lm_damping=lm_damping, gain=gain) + self.weights = weights + + def compute_error(self, configuration): + error = super().compute_error(configuration) + return self.weights * error + + def compute_jacobian(self, configuration): + J = super().compute_jacobian(configuration) + # breakpoint() + return self.weights[:, np.newaxis] * J + + def __repr__(self): + """Human-readable representation of the weighted posture task.""" + return ( + "WeightedPostureTask(" + f"cost={self.cost}, " + f"weights={self.weights}, " + f"gain={self.gain}, " + f"lm_damping={self.lm_damping})" + ) + + +class BodyIKSolver(Solver): + def __init__(self, ik_solver_settings: BodyIKSolverSettings): + self.dt = ik_solver_settings.dt + self.num_step_per_frame = ik_solver_settings.num_step_per_frame + self.amplify_factor = ik_solver_settings.amplify_factor + self.link_costs = ik_solver_settings.link_costs + self.posture_weight = ik_solver_settings.posture_weight + self.posture_cost = ik_solver_settings.posture_cost + self.posture_lm_damping = ik_solver_settings.posture_lm_damping + self.robot = None + + def register_robot(self, robot): + self.robot = robot + self.initialize() + + def initialize(self): + self.solver = qpsolvers.available_solvers[0] + if "quadprog" in qpsolvers.available_solvers: + self.solver = "quadprog" + else: + self.solver = qpsolvers.available_solvers[0] + + q_default = self.robot.q_zero.copy() + q_default[self.robot.joint_to_dof_index["left_shoulder_roll_joint"]] = 0.2 + q_default[self.robot.joint_to_dof_index["right_shoulder_roll_joint"]] = -0.2 + + self.configuration = pink.Configuration( + self.robot.pinocchio_wrapper.model, + self.robot.pinocchio_wrapper.data, + q_default, + ) + self.configuration.model.lowerPositionLimit = self.robot.lower_joint_limits + self.configuration.model.upperPositionLimit = self.robot.upper_joint_limits + + # initialize tasks + self.tasks = {} + for link_name, weight in self.link_costs.items(): + assert link_name != "posture", "posture is a reserved task name" + + # Map robot-agnostic link names to robot-specific names + if link_name == "hand": + # Use hand_frame_names from supplemental info + for side in ["left", "right"]: + frame_name = self.robot.supplemental_info.hand_frame_names[side] + task = FrameTask( + frame_name, + **weight, + ) + self.tasks[frame_name] = task + else: + # For other links, use the name directly + task = FrameTask( + link_name, + **weight, + ) + self.tasks[link_name] = task + + # add posture task + if self.posture_weight is not None: + weight = np.ones(self.robot.num_dofs) + + # Map robot-agnostic joint types to specific robot joint names using supplemental info + for joint_type, posture_weight in self.posture_weight.items(): + if joint_type not in self.robot.supplemental_info.joint_name_mapping: + print(f"Warning: Unknown joint type {joint_type}") + continue + + # Get the joint name mapping for this type + joint_mapping = self.robot.supplemental_info.joint_name_mapping[joint_type] + + # Handle both single joint names and left/right mappings + if isinstance(joint_mapping, str): + # Single joint (e.g., waist joints) + if joint_mapping in self.robot.joint_to_dof_index: + joint_idx = self.robot.joint_to_dof_index[joint_mapping] + weight[joint_idx] = posture_weight + else: + # Left/right mapping (e.g., arm joints) + for side in ["left", "right"]: + joint_name = joint_mapping[side] + if joint_name in self.robot.joint_to_dof_index: + joint_idx = self.robot.joint_to_dof_index[joint_name] + weight[joint_idx] = posture_weight + + self.tasks["posture"] = WeightedPostureTask( + cost=self.posture_cost, + weights=weight, + lm_damping=self.posture_lm_damping, + ) + else: + self.tasks["posture"] = PostureTask( + cost=self.posture_cost, lm_damping=self.posture_lm_damping + ) + for task in self.tasks.values(): + task.set_target_from_configuration(self.configuration) + + def __call__(self, target_pose: Dict): + for link_name, pose in target_pose.items(): + if link_name not in self.tasks: + continue + pose = pin.SE3(pose[:3, :3], pose[:3, 3]) + self.tasks[link_name].set_target(pose) + + for _ in range(self.num_step_per_frame): + velocity = solve_ik( + self.configuration, + self.tasks.values(), + dt=self.dt, + solver=self.solver, + ) + self.configuration.q = self.robot.clip_configuration( + self.configuration.q + velocity * self.dt * self.amplify_factor + ) + self.configuration.update() + self.robot.cache_forward_kinematics(self.configuration.q) + + return self.configuration.q.copy() + + def update_weights(self, weights): + for link_name, weight in weights.items(): + if "position_cost" in weight: + self.tasks[link_name].set_position_cost(weight["position_cost"]) + if "orientation_cost" in weight: + self.tasks[link_name].set_orientation_cost(weight["orientation_cost"]) diff --git a/gr00t_wbc/control/teleop/solver/body/body_ik_solver_settings.py b/decoupled_wbc/control/teleop/solver/body/body_ik_solver_settings.py similarity index 100% rename from gr00t_wbc/control/teleop/solver/body/body_ik_solver_settings.py rename to decoupled_wbc/control/teleop/solver/body/body_ik_solver_settings.py diff --git a/decoupled_wbc/control/teleop/solver/hand/g1_gripper_ik_solver.py b/decoupled_wbc/control/teleop/solver/hand/g1_gripper_ik_solver.py new file mode 100644 index 0000000..0b736bf --- /dev/null +++ b/decoupled_wbc/control/teleop/solver/hand/g1_gripper_ik_solver.py @@ -0,0 +1,137 @@ +import numpy as np + +from decoupled_wbc.control.teleop.solver.solver import Solver + + +###################################### +### Define your solver here +###################################### +class G1GripperInverseKinematicsSolver(Solver): + def __init__(self, side) -> None: + self.side = "L" if side.lower() == "left" else "R" + + def register_robot(self, robot): + pass + + def __call__(self, finger_data): + q_desired = np.zeros(7) + + # manus data + fingertips = finger_data["position"] + + # Extract X, Y, Z positions of fingertips from the transformation matrices + positions = np.array([finger[:3, 3] for finger in fingertips]) + + # Ensure the positions are 2D arrays (N, 3) + positions = np.reshape(positions, (-1, 3)) # Ensure 2D array with shape (N, 3) + + thumb_pos = positions[4, :] + index_pos = positions[4 + 5, :] + middle_pos = positions[4 + 10, :] + ring_pos = positions[4 + 15, :] + pinky_pos = positions[4 + 20, :] + + index_dist = np.linalg.norm(thumb_pos - index_pos) + middle_dist = np.linalg.norm(thumb_pos - middle_pos) + ring_dist = np.linalg.norm(thumb_pos - ring_pos) + pinky_dist = np.linalg.norm(thumb_pos - pinky_pos) + dist_threshold = 0.05 + + index_close = index_dist < dist_threshold + middle_close = middle_dist < dist_threshold + ring_close = ring_dist < dist_threshold + pinky_close = pinky_dist < dist_threshold + + if index_close: + q_desired = self._get_index_close_q_desired() + elif middle_close: + q_desired = self._get_middle_close_q_desired() + elif ring_close: + q_desired = self._get_ring_close_q_desired() + elif pinky_close: + q_desired = self._get_pinky_close_q_desired() + + return q_desired + + def _get_index_close_q_desired(self): + q_desired = np.zeros(7) + + amp0 = 0.5 + if self.side == "L": + q_desired[0] -= amp0 + else: + q_desired[0] += amp0 + + amp = 0.7 + + q_desired[1] += amp + q_desired[2] += amp + + ampA1 = 1.5 + ampB1 = 1.5 + ampA2 = 0.6 + ampB2 = 1.5 + + q_desired[3] -= ampA1 + q_desired[4] -= ampB1 + q_desired[5] -= ampA2 + q_desired[6] -= ampB2 + + return q_desired if self.side == "L" else -q_desired + + def _get_middle_close_q_desired(self): + q_desired = np.zeros(7) + + amp0 = 0.0 + if self.side == "L": + q_desired[0] -= amp0 + else: + q_desired[0] += amp0 + + amp = 0.7 + + q_desired[1] += amp + q_desired[2] += amp + + ampA1 = 1.0 + ampB1 = 1.5 + ampA2 = 1.0 + ampB2 = 1.5 + + q_desired[3] -= ampA1 + q_desired[4] -= ampB1 + q_desired[5] -= ampA2 + q_desired[6] -= ampB2 + + return q_desired if self.side == "L" else -q_desired + + def _get_ring_close_q_desired(self): + q_desired = np.zeros(7) + + amp0 = -0.5 + if self.side == "L": + q_desired[0] -= amp0 + else: + q_desired[0] += amp0 + + amp = 0.7 + + q_desired[1] += amp + q_desired[2] += amp + + ampA1 = 0.6 + ampB1 = 1.5 + ampA2 = 1.5 + ampB2 = 1.5 + + q_desired[3] -= ampA1 + q_desired[4] -= ampB1 + q_desired[5] -= ampA2 + q_desired[6] -= ampB2 + + return q_desired if self.side == "L" else -q_desired + + def _get_pinky_close_q_desired(self): + q_desired = np.zeros(7) + + return q_desired if self.side == "L" else -q_desired diff --git a/decoupled_wbc/control/teleop/solver/hand/instantiation/g1_hand_ik_instantiation.py b/decoupled_wbc/control/teleop/solver/hand/instantiation/g1_hand_ik_instantiation.py new file mode 100644 index 0000000..c06561f --- /dev/null +++ b/decoupled_wbc/control/teleop/solver/hand/instantiation/g1_hand_ik_instantiation.py @@ -0,0 +1,10 @@ +from decoupled_wbc.control.teleop.solver.hand.g1_gripper_ik_solver import ( + G1GripperInverseKinematicsSolver, +) + + +# initialize hand ik solvers for g1 robot +def instantiate_g1_hand_ik_solver(): + left_hand_ik_solver = G1GripperInverseKinematicsSolver(side="left") + right_hand_ik_solver = G1GripperInverseKinematicsSolver(side="right") + return left_hand_ik_solver, right_hand_ik_solver diff --git a/gr00t_wbc/control/teleop/solver/solver.py b/decoupled_wbc/control/teleop/solver/solver.py similarity index 100% rename from gr00t_wbc/control/teleop/solver/solver.py rename to decoupled_wbc/control/teleop/solver/solver.py diff --git a/gr00t_wbc/control/teleop/streamers/base_streamer.py b/decoupled_wbc/control/teleop/streamers/base_streamer.py similarity index 100% rename from gr00t_wbc/control/teleop/streamers/base_streamer.py rename to decoupled_wbc/control/teleop/streamers/base_streamer.py diff --git a/decoupled_wbc/control/teleop/streamers/dummy_streamer.py b/decoupled_wbc/control/teleop/streamers/dummy_streamer.py new file mode 100644 index 0000000..a7e372d --- /dev/null +++ b/decoupled_wbc/control/teleop/streamers/dummy_streamer.py @@ -0,0 +1,81 @@ +import numpy as np + +from decoupled_wbc.control.teleop.streamers.base_streamer import BaseStreamer, StreamerOutput +from decoupled_wbc.control.utils.keyboard_dispatcher import KeyboardListenerSubscriber + + +class DummyStreamer(BaseStreamer): + """A dummy streamer that returns hardcoded structured data for testing.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.is_streaming = False + self.keyboard_listener = KeyboardListenerSubscriber() + self.left_fingertips = np.zeros((25, 4, 4)) + self.right_fingertips = np.zeros((25, 4, 4)) + self.left_fingertips[4, 0, 3] = 1.0 # open + self.right_fingertips[4, 0, 3] = 1.0 # open + + def start_streaming(self): + self.is_streaming = True + print("Dummy streamer started with hardcoded IK data") + + def get(self) -> StreamerOutput: + """Return hardcoded dummy data with proper IK format.""" + if not self.is_streaming: + return StreamerOutput() + + # Hardcoded dummy data - identity matrices and zeros + left_wrist_pose = np.eye(4) + right_wrist_pose = np.eye(4) + left_fingers, right_fingers = self._generate_finger_data() + + return StreamerOutput( + ik_data={ + "left_wrist": left_wrist_pose, + "right_wrist": right_wrist_pose, + "left_fingers": {"position": left_fingers}, + "right_fingers": {"position": right_fingers}, + }, + control_data={ + "navigate_cmd": [0.0, 0.0, 0.0], # No movement + "toggle_stand_command": False, + "base_height_command": 0.78, # Default standing height + }, + teleop_data={ + "toggle_activation": False, + "freeze_upper_body": False, + }, + source="dummy", + ) + + def stop_streaming(self): + self.is_streaming = False + print("Dummy streamer stopped") + + def _generate_finger_data(self): + """Generate finger position data similar to iPhone streamer.""" + + # Control thumb based on shoulder button state (index 4 is thumb tip) + key = self.keyboard_listener.read_msg() + index = 5 + middle = 10 + ring = 15 + if key is not None: + if key == "b": + if self.left_fingertips[4 + index, 0, 3] == 1.0: + self.left_fingertips[4 + index, 0, 3] = 0.0 # open + else: + self.left_fingertips[4 + index, 0, 3] = 1.0 # closed + if key == "n": + if self.left_fingertips[4 + middle, 0, 3] == 1.0: + self.left_fingertips[4 + middle, 0, 3] = 0.0 # open + else: + self.left_fingertips[4 + middle, 0, 3] = 1.0 # closed + if key == "m": + if self.left_fingertips[4 + ring, 0, 3] == 1.0: + self.left_fingertips[4 + ring, 0, 3] = 0.0 # open + else: + self.left_fingertips[4 + ring, 0, 3] = 1.0 # closed + + return self.left_fingertips, self.right_fingertips diff --git a/decoupled_wbc/control/teleop/streamers/iphone_streamer.py b/decoupled_wbc/control/teleop/streamers/iphone_streamer.py new file mode 100644 index 0000000..6854450 --- /dev/null +++ b/decoupled_wbc/control/teleop/streamers/iphone_streamer.py @@ -0,0 +1,150 @@ +import concurrent.futures +import time + +import numpy as np +from scipy.spatial.transform import Rotation as R + +from decoupled_wbc.control.teleop.device.iphone.iphone import IPhoneDevice +from decoupled_wbc.control.teleop.streamers.base_streamer import BaseStreamer, StreamerOutput + + +def get_data_with_timeout(obj, timeout=0.02): + with concurrent.futures.ThreadPoolExecutor() as executor: + future = executor.submit(obj.data_collect.request_vive_data) + try: + combined_data = future.result(timeout=timeout) + return combined_data + except concurrent.futures.TimeoutError: + print("Data request timed out.") + return None + + +def get_transformation(vive_raw_data): + """ + Turn the raw data from the Vive tracker into a transformation matrix. + """ + position = np.array( + [ + vive_raw_data["position"]["x"], + vive_raw_data["position"]["y"], + vive_raw_data["position"]["z"], + ] + ) + quat = np.array( + [ + vive_raw_data["orientation"]["x"], + vive_raw_data["orientation"]["y"], + vive_raw_data["orientation"]["z"], + vive_raw_data["orientation"]["w"], + ] + ) + T = np.identity(4) + T[:3, :3] = R.from_quat(quat).as_matrix() + T[:3, 3] = position + return T + + +class IphoneStreamer(BaseStreamer): + def __init__(self): + self.left_device = IPhoneDevice(port=5557) + self.right_device = IPhoneDevice(port=5558) + self.left_prev_button_states = {"Reset": False, "Close": False} + self.right_prev_button_states = {"Reset": False, "Close": False} + + def start_streaming(self): + self.left_device.start() + self.right_device.start() + + def __del__(self): + self.stop_streaming() + + def get(self): + """Request combined data and return transformations as StreamerOutput.""" + # Initialize data groups + ik_data = {} # For pose and joint data (ik_keys) + control_data = {} # For robot control commands (control_keys) + teleop_data = {} # For internal policy commands (teleop_keys) + + try: + # Request combined data from the server and wait until we get data + left_combined_data = None + right_combined_data = None + while not left_combined_data: + left_combined_data = self.left_device.get_cmd() + if not left_combined_data: + print("Waiting for left iPhone data...") + while not right_combined_data: + right_combined_data = self.right_device.get_cmd() + if not right_combined_data: + print("Waiting for right iPhone data...") + + # IK data - wrist poses and finger positions (ik_keys) + ik_data["left_wrist"] = np.array(left_combined_data.get("transformMatrix")) + ik_data["right_wrist"] = np.array(right_combined_data.get("transformMatrix")) + + # left button states + current_left_reset = left_combined_data.get("buttonStates").get("Reset") + current_left_close = left_combined_data.get("buttonStates").get("Close") + + # Trigger logic: only True when button transitions from False to True + left_reset = current_left_reset and not self.left_prev_button_states["Reset"] + + # Store current button states for next iteration + self.left_prev_button_states["Reset"] = current_left_reset + self.left_prev_button_states["Close"] = current_left_close + + # left fingers - IK data (ik_keys) + fingertips = np.zeros([25, 4, 4]) + positions = fingertips[:, :3, 3] + if current_left_close: + positions[4, 0] = 0 # closed + else: + positions[4, 0] = 1 # open + ik_data["left_fingers"] = {"position": fingertips} + + # right button states + current_right_reset = right_combined_data.get("buttonStates").get("Reset") + current_right_close = right_combined_data.get("buttonStates").get("Close") + + # Trigger logic: only True when button transitions from False to True + right_reset = current_right_reset and not self.right_prev_button_states["Reset"] + + # Store current button states for next iteration + self.right_prev_button_states["Reset"] = current_right_reset + self.right_prev_button_states["Close"] = current_right_close + + # right fingers - IK data (ik_keys) + fingertips = np.zeros([25, 4, 4]) + positions = fingertips[:, :3, 3] + if current_right_close: + positions[4, 0] = 0 # closed + else: + positions[4, 0] = 1 # open + ik_data["right_fingers"] = {"position": fingertips} + + # Teleop commands (teleop_keys) - used by TeleopPolicy for activation + teleop_data["toggle_activation"] = left_reset + + # Control commands (control_keys) - sent to robot + control_data["toggle_stand_command"] = right_reset + + except Exception as e: + print(f"Error while requesting iPhone data: {e}") + + # Return structured output + return StreamerOutput( + ik_data=ik_data, control_data=control_data, teleop_data=teleop_data, source="iphone" + ) + + def stop_streaming(self): + self.left_device.stop() + self.right_device.stop() + + +if __name__ == "__main__": + streamer = IphoneStreamer() + streamer.start_streaming() + while True: + data = streamer.get() + print(data) + time.sleep(0.1) diff --git a/decoupled_wbc/control/teleop/streamers/joycon_streamer.py b/decoupled_wbc/control/teleop/streamers/joycon_streamer.py new file mode 100644 index 0000000..8c2ae31 --- /dev/null +++ b/decoupled_wbc/control/teleop/streamers/joycon_streamer.py @@ -0,0 +1,695 @@ +import time +from typing import Any, Dict + +from evdev import InputDevice, ecodes, list_devices +import numpy as np + +from decoupled_wbc.control.teleop.streamers.base_streamer import BaseStreamer, StreamerOutput + + +class JoyConDevice: + """ + A class to handle a single Joy-Con controller using evdev. + Supports both individual Joy-Cons and their IMU sensors. + """ + + def __init__( + self, device_path: str = None, controller_type: str = "auto", silent: bool = False + ): + """ + Initialize the Joy-Con device. + + Args: + device_path: Path to the Joy-Con event device (e.g., /dev/input/event25) + controller_type: "left", "right", or "auto" + silent: Whether to suppress output messages + """ + self._silent = silent + self._device_path = device_path + self._controller_type = controller_type + self._device = None + self._prev_button_states = {} + + # Joy-Con button mappings (evdev key codes) + self._setup_mappings() + + # Current analog stick values + self._stick_x = 0.0 + self._stick_y = 0.0 + + # Axis codes (will be set in start() based on controller type) + self._x_axis_code = 0 # Default for left Joy-Con + self._y_axis_code = 1 # Default for left Joy-Con + + def _setup_mappings(self): + """Set up button and axis mappings for Joy-Con event codes.""" + # Left Joy-Con button mappings + self._left_button_mappings = { + # Left Joy-Con specific buttons + 544: "dpad_up", + 545: "dpad_down", + 546: "dpad_left", + 547: "dpad_right", + 309: "capture", + 314: "minus", + # Left Joy-Con shoulder/trigger buttons + 310: "L", # L button + 312: "ZL", # ZL trigger + 311: "SL", # SL side button + 313: "SR", # SR side button + } + + # Right Joy-Con button mappings + self._right_button_mappings = { + # Right Joy-Con specific buttons + 304: "a", + 305: "b", + 307: "x", + 308: "y", + 315: "plus", + 316: "home", + # Right Joy-Con shoulder/trigger buttons + 311: "R", # SL side button + 313: "ZR", # SR side button + 310: "SL", # R button + 312: "SR", # ZR trigger + } + + # Will be set in start() based on controller type + self._button_mappings = {} + self._prev_button_states = {} + + # Axis codes (set dynamically in start()) + self._x_axis_code = 0 + self._y_axis_code = 1 + + def _find_joycon_device(self): + """Find and return a Joy-Con device, waiting until one is found.""" + while True: + devices = [InputDevice(path) for path in list_devices()] + joycon_devices = [] + for device in devices: + if "Joy-Con" in device.name and "IMU" not in device.name: + joycon_devices.append(device) + + if not joycon_devices: + if not self._silent: + print("Warning: No Joy-Con devices found. Waiting for Joy-Con connection...") + time.sleep(1.0) # Wait 1 second before checking again + continue + + # Joy-Con devices found, proceed with selection + # If specific type requested, find matching device + if self._controller_type == "left": + for device in joycon_devices: + if "Joy-Con (L)" in device.name: + if not self._silent: + print(f"Found requested left Joy-Con: {device.name}") + return device + # If left Joy-Con not found but other Joy-Cons exist + if not self._silent: + print("Warning: Left Joy-Con requested but not found. Waiting...") + time.sleep(1.0) + continue + + elif self._controller_type == "right": + for device in joycon_devices: + if "Joy-Con (R)" in device.name: + if not self._silent: + print(f"Found requested right Joy-Con: {device.name}") + return device + # If right Joy-Con not found but other Joy-Cons exist + if not self._silent: + print("Warning: Right Joy-Con requested but not found. Waiting...") + time.sleep(1.0) + continue + + # Auto mode or fallback - return first available + if not self._silent: + print(f"Found Joy-Con in auto mode: {joycon_devices[0].name}") + return joycon_devices[0] + + def start(self): + """Start the Joy-Con device.""" + try: + if self._device_path: + # Use specific device path + self._device = InputDevice(self._device_path) + else: + # Auto-find Joy-Con device + self._device = self._find_joycon_device() + + # Determine actual controller type and axis codes from device name + if "Joy-Con (L)" in self._device.name: + self._controller_type = "left" + self._x_axis_code = 0 + self._y_axis_code = 1 + # Set up left Joy-Con specific button mappings + self._button_mappings = self._left_button_mappings.copy() + elif "Joy-Con (R)" in self._device.name: + self._controller_type = "right" + self._x_axis_code = 3 # Right Joy-Con uses ABS_RX (3) + self._y_axis_code = 4 # Right Joy-Con uses ABS_RY (4) + # Set up right Joy-Con specific button mappings + self._button_mappings = self._right_button_mappings.copy() + else: + # Unknown device, keep defaults + if not self._silent: + print(f"Warning: Unknown Joy-Con type: {self._device.name}") + self._button_mappings = self._left_button_mappings.copy() + + # Initialize button states + self._prev_button_states = {name: False for name in self._button_mappings.values()} + + if not self._silent: + print(f"Joy-Con connected: {self._device.name}") + print(f"Device path: {self._device.path}") + print(f"Controller type: {self._controller_type}") + print(f"Axis codes: X={self._x_axis_code}, Y={self._y_axis_code}") + print(f"Button mappings: {list(self._button_mappings.values())}") + print(f"Capabilities: {list(self._device.capabilities(verbose=True).keys())}") + + except Exception as e: + if not self._silent: + print(f"Failed to initialize Joy-Con: {e}") + raise e + + def stop(self): + """Stop the Joy-Con device.""" + if self._device: + try: + self._device.close() + except (OSError, AttributeError): + pass # Ignore errors during cleanup + if not self._silent: + print("Joy-Con disconnected.") + + def get_state(self) -> Dict[str, Any]: + """Get the current state of the Joy-Con by reading all available events.""" + if not self._device: + return {} + + try: + # Read all available events (non-blocking) + import select + + r, w, x = select.select([self._device], [], [], 0) # Non-blocking + + if r: + # Read ALL events available right now + events = self._device.read() + for event in events: + self._process_event(event) + + # Return current state + return self._build_state() + + except Exception as e: + if not self._silent: + print(f"Error reading Joy-Con state: {e}") + return {} + + def _process_event(self, event): + """Process a single input event from the Joy-Con.""" + if event.type == ecodes.EV_KEY: + # Button event + button_pressed = event.value == 1 + button_name = self._button_mappings.get(event.code) + if button_name: + self._prev_button_states[button_name] = button_pressed + + elif event.type == ecodes.EV_ABS: + # Analog stick event - use dynamic axis codes + if event.code == self._x_axis_code: + self._stick_x = event.value / 32767.0 + elif event.code == self._y_axis_code: + self._stick_y = -event.value / 32767.0 + + def _build_state(self): + """Build the current state dictionary.""" + if not self._device: + return {} + + # Get current button states and detect press events + buttons = {} + button_events = {} + + for button_name in self._button_mappings.values(): + current_state = self._prev_button_states.get(button_name, False) + buttons[button_name] = current_state + + # Detect button press events (transition from False to True) + prev_state = getattr(self, f"_prev_press_{button_name}", False) + button_events[f"{button_name}_pressed"] = current_state and not prev_state + setattr(self, f"_prev_press_{button_name}", current_state) + + # Package state + return { + "buttons": buttons, + "button_events": button_events, + "axes": {"stick_x": self._stick_x, "stick_y": self._stick_y}, + "timestamp": time.time(), + } + + +class JoyconStreamer(BaseStreamer): + """ + A streamer for Nintendo Joy-Con controllers following iPhone streamer pattern. + Supports two modes: locomotion and manipulation. + """ + + def __init__( + self, left_device_path: str = None, right_device_path: str = None, silent: bool = False + ): + """Initialize the Joy-Con streamer.""" + super().__init__() + + self._silent = silent + self.latest_data = {} + + # Mode management + self.reset_status() + + # Initialize devices + self.left_device = JoyConDevice(left_device_path, "left", silent) + self.right_device = JoyConDevice(right_device_path, "right", silent) + + def reset_status(self): + """Reset the cache of the streamer.""" + self.current_base_height = 0.74 # Initial base height, 0.74m (standing height) + self.stand_toggle_cooldown = 0.5 # prevent rapid stand toggling + self.last_stand_toggle_time = 0 + + def start_streaming(self): + """Start streaming from Joy-Con devices.""" + try: + if not self._silent: + print("Starting Joy-Con devices...") + + # Start devices and wait until both are connected + self.left_device.start() + self.right_device.start() + + # Wait until both devices are ready + max_wait_time = 10 # seconds + start_time = time.time() + while (time.time() - start_time) < max_wait_time: + left_state = self.left_device.get_state() + right_state = self.right_device.get_state() + + if left_state and right_state: + if not self._silent: + print("Both Joy-Con devices connected successfully!") + break + + if not self._silent: + print("Waiting for both Joy-Con devices to connect...") + time.sleep(0.5) + else: + print("Warning: Timeout waiting for both Joy-Con devices") + + if not self._silent: + print("Joy-Con streamer started in unified mode") + print("Controls:") + print(" ZL+ZR: Toggle stand command") + print(" D-pad Up/Down: Increase/Decrease base height") + print(" Capture (Left): Toggle locomotion policy (e-stop for lower body)") + print(" L/R shoulders: Left/Right finger open/close") + print(" Home button: Toggle activation") + print(" Left stick: Forward/backward and strafe movement") + print(" Right stick: Yaw rotation") + print(" A button: Toggle data collection") + print(" B button: Abort current episode") + + except Exception as e: + if not self._silent: + print(f"Failed to start Joy-Con streaming: {e}") + raise e + + def stop_streaming(self): + """Stop streaming from Joy-Con devices.""" + if self.left_device: + self.left_device.stop() + if self.right_device: + self.right_device.stop() + + if not self._silent: + print("Joy-Con streaming stopped") + + def _get_joycon_state(self): + """ + Get combined Joy-Con state with error handling. + DDA: Warning and wait until we got all left and right device data. + """ + left_state = self.left_device.get_state() if self.left_device else {} + right_state = self.right_device.get_state() if self.right_device else {} + + # Check if we have valid data from both devices + if not left_state or not right_state: + if not self._silent: + missing = [] + if not left_state: + missing.append("left") + if not right_state: + missing.append("right") + print(f"Warning: Missing Joy-Con data from {', '.join(missing)} device(s)") + + # Return empty data structure if either device is missing + return { + "left_button_states": {}, + "right_button_states": {}, + "left_stick_inputs": {}, + "right_stick_inputs": {}, + } + + # Combine states + combined_data = { + "left_button_states": left_state.get("buttons", {}), + "right_button_states": right_state.get("buttons", {}), + "left_stick_inputs": left_state.get("axes", {}), + "right_stick_inputs": right_state.get("axes", {}), + "left_button_events": left_state.get("button_events", {}), + "right_button_events": right_state.get("button_events", {}), + } + return combined_data + + def _handle_stand_toggle(self, joycon_data): + """Handle stand toggle command via ZL+ZR buttons.""" + current_time = time.time() + if (current_time - self.last_stand_toggle_time) > self.stand_toggle_cooldown: + # Use button states to check if both triggers are currently pressed + left_buttons = joycon_data.get("left_button_states", {}) + right_buttons = joycon_data.get("right_button_states", {}) + + # Use button events to detect if at least one trigger was just pressed + left_button_events = joycon_data.get("left_button_events", {}) + right_button_events = joycon_data.get("right_button_events", {}) + + # Check if both triggers are held AND at least one was just pressed + both_triggers_held = left_buttons.get("ZL", False) and right_buttons.get("ZR", False) + at_least_one_just_pressed = left_button_events.get( + "ZL_pressed", False + ) or right_button_events.get("ZR_pressed", False) + + if both_triggers_held and at_least_one_just_pressed: + self.last_stand_toggle_time = current_time + if not self._silent: + print("Stand toggle activated") + return True + return False + + def _apply_dead_zone(self, value, dead_zone): + """Apply dead zone and normalize.""" + if abs(value) < dead_zone: + return 0.0 + sign = 1 if value > 0 else -1 + # Normalize the output to be between -1 and 1 after dead zone + return sign * (abs(value) - dead_zone) / (1.0 - dead_zone) + + def _handle_height_adjustment(self, joycon_data): + """Handle base height adjustment via d-pad up/down.""" + left_button_states = joycon_data.get("left_button_states", {}) + + # Check d-pad button states + dpad_up = left_button_states.get("dpad_up", False) + dpad_down = left_button_states.get("dpad_down", False) + + # Incremental height adjustment + height_increment = 0.005 # Small step per call when button is pressed + + if dpad_up: + self.current_base_height += height_increment + elif dpad_down: + self.current_base_height -= height_increment + + # Clamp to bounds + self.current_base_height = np.clip(self.current_base_height, 0.2, 0.74) + + def _detect_stand_toggle(self, joycon_data): + """Detect stand/walk toggle command - triggered by ZL+ZR.""" + return self._handle_stand_toggle(joycon_data) + + def _detect_locomotion_policy_toggle(self, joycon_data): + """Detect locomotion policy toggle command using left Joy-Con capture button.""" + left_button_events = joycon_data.get("left_button_events", {}) + return left_button_events.get("capture_pressed", False) + + def _detect_emergency_stop(self, joycon_data): + """Detect emergency stop command (Placeholder - not implemented yet).""" + return False # Not implemented yet + + def _detect_data_collection_toggle(self, joycon_data): + """Detect data collection toggle command using right Joy-Con A button.""" + right_button_events = joycon_data.get("right_button_events", {}) + return right_button_events.get("a_pressed", False) + + def _detect_abort_toggle(self, joycon_data): + """Detect abort toggle command using right Joy-Con B button.""" + right_button_events = joycon_data.get("right_button_events", {}) + return right_button_events.get("b_pressed", False) + + def _generate_finger_data(self, shoulder_button_pressed): + """Generate finger position data similar to iPhone streamer.""" + fingertips = np.zeros([25, 4, 4]) + + # Set identity matrices for all finger joints + for i in range(25): + fingertips[i] = np.eye(4) + + # Control thumb based on shoulder button state (index 4 is thumb tip) + if shoulder_button_pressed: + fingertips[4, 0, 3] = 0.0 # closed + else: + fingertips[4, 0, 3] = 1.0 # open + + return fingertips + + def _generate_unified_raw_data(self, joycon_data): + """Generate unified raw_data combining navigation, finger control, and height adjustment.""" + + # Extract stick inputs + left_stick = joycon_data.get("left_stick_inputs", {}) + right_stick = joycon_data.get("right_stick_inputs", {}) + + # Extract button/trigger states for finger control + left_buttons = joycon_data.get("left_button_states", {}) + right_buttons = joycon_data.get("right_button_states", {}) + + # Handle d-pad height adjustment + self._handle_height_adjustment(joycon_data) + + # Map to velocity commands with dead zones and scaling + DEAD_ZONE = 0.1 + MAX_LINEAR_VEL = 0.2 # m/s + MAX_ANGULAR_VEL = 0.5 # rad/s + + # Left stick Y for forward/backward (lin_vel_x), X for strafe (lin_vel_y). + # Right stick X for yaw (ang_vel_z). Verify command signs. + fwd_bwd_input = left_stick.get("stick_y", 0.0) + strafe_input = -left_stick.get("stick_x", 0.0) # Flip sign for intuitive left/right + yaw_input = -right_stick.get("stick_x", 0.0) + + lin_vel_x = self._apply_dead_zone(fwd_bwd_input, DEAD_ZONE) * MAX_LINEAR_VEL + lin_vel_y = self._apply_dead_zone(strafe_input, DEAD_ZONE) * MAX_LINEAR_VEL # Strafe + ang_vel_z = self._apply_dead_zone(yaw_input, DEAD_ZONE) * MAX_ANGULAR_VEL + + # Extract home button press event for toggle activation + right_button_events = joycon_data.get("right_button_events", {}) + home_button_pressed = right_button_events.get("home_pressed", False) + + # Map L/R (shoulder buttons) to finger control + left_shoulder_pressed = left_buttons.get("L", False) + right_shoulder_pressed = right_buttons.get("R", False) + + # Generate finger data based on shoulder button states + left_fingers = self._generate_finger_data(left_shoulder_pressed) + right_fingers = self._generate_finger_data(right_shoulder_pressed) + + return StreamerOutput( + ik_data={ + "left_fingers": {"position": left_fingers}, + "right_fingers": {"position": right_fingers}, + }, + control_data={ + "base_height_command": self.current_base_height, + "navigate_cmd": [lin_vel_x, lin_vel_y, ang_vel_z], + "toggle_stand_command": self._detect_stand_toggle(joycon_data), + "toggle_policy_action": self._detect_locomotion_policy_toggle(joycon_data), + }, + teleop_data={ + "toggle_activation": home_button_pressed, + }, + data_collection_data={ + "toggle_data_collection": self._detect_data_collection_toggle(joycon_data), + "toggle_data_abort": self._detect_abort_toggle(joycon_data), + }, + source="joycon", + ) + + def get(self) -> StreamerOutput: + """ + Return StreamerOutput with unified control data. + """ + # Get current Joy-Con state + joycon_data = self._get_joycon_state() + + # Generate unified structured output + raw_data = self._generate_unified_raw_data(joycon_data) + return raw_data + + def __del__(self): + """Cleanup when the streamer is destroyed.""" + self.stop_streaming() + + +def debug_joycons(controller_type="both", duration=30): + """Debug function to show Joy-Con devices and capture real-time events.""" + print(f"\n=== JOY-CON DEBUG ({controller_type.upper()}) ===") + + # Show available devices + devices = [InputDevice(path) for path in list_devices()] + joycon_devices = [d for d in devices if "Joy-Con" in d.name and "IMU" not in d.name] + print(f"Found {len(joycon_devices)} Joy-Con devices:") + for device in joycon_devices: + print(f" {device.name} at {device.path}") + + if not joycon_devices: + print("No Joy-Con devices found!") + return + + # Select devices to monitor + devices_to_monitor = [] + if controller_type in ["left", "both"]: + devices_to_monitor.extend([d for d in joycon_devices if "Joy-Con (L)" in d.name]) + if controller_type in ["right", "both"]: + devices_to_monitor.extend([d for d in joycon_devices if "Joy-Con (R)" in d.name]) + + if not devices_to_monitor: + print(f"No {controller_type} Joy-Con found!") + return + + print(f"\nMonitoring events for {duration}s (Press Ctrl+C to stop)...") + + import select + + start_time = time.time() + try: + while (time.time() - start_time) < duration: + r, w, x = select.select(devices_to_monitor, [], [], 0.1) + for device in r: + events = device.read() + for event in events: + if event.type in [ecodes.EV_KEY, ecodes.EV_ABS]: + event_type = "KEY" if event.type == ecodes.EV_KEY else "ABS" + device_name = "L" if "Joy-Con (L)" in device.name else "R" + print(f"[{device_name}] {event_type}:{event.code}:{event.value}") + except KeyboardInterrupt: + print("\nStopped.") + finally: + for device in devices_to_monitor: + device.close() + + print("=== END DEBUG ===\n") + + +def test_unified_controls(): + """Test unified control system with all functionality available.""" + print("=== Testing Unified Controls ===") + print("Controls:") + print(" ZL+ZR: Toggle stand command") + print(" D-pad Up/Down: Increase/Decrease base height") + print(" Capture (Left): Toggle locomotion policy (e-stop for lower body)") + print(" Left stick: Forward/backward and strafe movement") + print(" Right stick: Yaw rotation") + print(" L/R shoulders: Left/Right finger open/close") + print(" Home button: Toggle activation") + print(" A button: Toggle data collection") + print(" B button: Abort current episode") + print("Press Ctrl+C to stop\n") + + streamer = JoyconStreamer(silent=False) + + try: + streamer.start_streaming() + + while True: + data = streamer.get() + + # Extract all control data + nav_cmd = data.control_data.get("navigate_cmd", [0, 0, 0]) + height_cmd = data.control_data.get("base_height_command", 0.78) + stand_toggle = data.control_data.get("toggle_stand_command", False) + policy_action = data.control_data.get("toggle_policy_action", False) + toggle_activation = data.teleop_data.get("toggle_activation", False) + toggle_data_collection = data.data_collection_data.get("toggle_data_collection", False) + toggle_data_abort = data.data_collection_data.get("toggle_data_abort", False) + + # Get finger states + left_fingers = data.ik_data.get("left_fingers", {}).get( + "position", np.zeros([25, 4, 4]) + ) + right_fingers = data.ik_data.get("right_fingers", {}).get( + "position", np.zeros([25, 4, 4]) + ) + left_closed = ( + left_fingers[4, 0, 3] == 0.0 if left_fingers.shape == (25, 4, 4) else False + ) + right_closed = ( + right_fingers[4, 0, 3] == 0.0 if right_fingers.shape == (25, 4, 4) else False + ) + + print( + f"UNIFIED - Nav:[{nav_cmd[0]:.2f},{nav_cmd[1]:.2f},{nav_cmd[2]:.2f}] " + f"Height:{height_cmd:.2f} L:{left_closed} R:{right_closed} " + f"StandToggle:{stand_toggle} PolicyAction:{policy_action} " + f"ToggleActivation:{toggle_activation} " + f"ToggleDataCollection:{toggle_data_collection} ToggleDataAbort:{toggle_data_abort}" + ) + + time.sleep(0.1) + + except KeyboardInterrupt: + print("\nStopping unified controls test...") + finally: + streamer.stop_streaming() + + +if __name__ == "__main__": + import sys + + # Check for debug mode + if len(sys.argv) > 1 and sys.argv[1] == "debug_both": + debug_joycons(controller_type="both", duration=60) + sys.exit(0) + + # Check for test mode + if len(sys.argv) > 1 and sys.argv[1] == "test_unified": + test_unified_controls() + sys.exit(0) + + # Default: show usage and run unified controls test + print("Joy-Con Teleop Streamer - Unified Mode") + print("Usage:") + print(" python joycon_streamer.py debug_both - Debug both Joy-Con devices") + print(" python joycon_streamer.py test_unified - Test unified controls") + print() + print("Unified Button Mapping:") + print("| Input | Function |") + print("|-------|----------|") + print("| Left Stick Y | Forward/Backward movement |") + print("| Left Stick X | Strafe Left/Right |") + print("| Right Stick X | Yaw rotation |") + print("| D-pad Up | Increase base height |") + print("| D-pad Down | Decrease base height |") + print("| L (Left Shoulder) | Left finger open/close |") + print("| R (Right Shoulder) | Right finger open/close |") + print("| Home (Right Joy-Con) | Toggle activation |") + print("| Capture (Left Joy-Con) | Toggle locomotion policy (e-stop) |") + print("| ZL+ZR (Triggers) | Toggle stand command |") + print("| A (Right Joy-Con) | Start/Stop Data Collection |") + print("| B (Right Joy-Con) | Abort Current Episode |") + print() + print("Running unified controls test by default...") + print() + + test_unified_controls() diff --git a/decoupled_wbc/control/teleop/streamers/leapmotion_streamer.py b/decoupled_wbc/control/teleop/streamers/leapmotion_streamer.py new file mode 100644 index 0000000..5025759 --- /dev/null +++ b/decoupled_wbc/control/teleop/streamers/leapmotion_streamer.py @@ -0,0 +1,289 @@ +from copy import deepcopy +import pickle +import queue +import threading +import time +from typing import Dict, List, Tuple + +import leap +import leap.events +import numpy as np +from scipy.spatial.transform import Rotation as R + +from decoupled_wbc.control.teleop.streamers.base_streamer import BaseStreamer, StreamerOutput + + +def xyz2np_array(position: leap.datatypes.Vector) -> np.ndarray: + return np.array([position.x, position.y, position.z]) + + +def quat2np_array(quaternion: leap.datatypes.Quaternion) -> np.ndarray: + return np.array([quaternion.x, quaternion.y, quaternion.z, quaternion.w]) + + +def get_raw_finger_points(hand: leap.datatypes.Hand) -> np.ndarray: + target_points = np.array([np.eye(4) for _ in range(25)]) + target_points[0, :3, :3] = R.from_quat(quat2np_array(hand.palm.orientation)).as_matrix() + target_points[0, :3, 3] = xyz2np_array(hand.arm.next_joint) + target_points[1, :3, 3] = xyz2np_array(hand.thumb.bones[0].next_joint) + target_points[2, :3, 3] = xyz2np_array(hand.thumb.bones[1].next_joint) + target_points[3, :3, 3] = xyz2np_array(hand.thumb.bones[2].next_joint) + target_points[4, :3, 3] = xyz2np_array(hand.thumb.bones[3].next_joint) + target_points[5, :3, 3] = xyz2np_array(hand.index.bones[0].prev_joint) + target_points[6, :3, 3] = xyz2np_array(hand.index.bones[0].next_joint) + target_points[7, :3, 3] = xyz2np_array(hand.index.bones[1].next_joint) + target_points[8, :3, 3] = xyz2np_array(hand.index.bones[2].next_joint) + target_points[9, :3, 3] = xyz2np_array(hand.index.bones[3].next_joint) + target_points[10, :3, 3] = xyz2np_array(hand.middle.bones[0].prev_joint) + target_points[11, :3, 3] = xyz2np_array(hand.middle.bones[0].next_joint) + target_points[12, :3, 3] = xyz2np_array(hand.middle.bones[1].next_joint) + target_points[13, :3, 3] = xyz2np_array(hand.middle.bones[2].next_joint) + target_points[14, :3, 3] = xyz2np_array(hand.middle.bones[3].next_joint) + target_points[15, :3, 3] = xyz2np_array(hand.ring.bones[0].prev_joint) + target_points[16, :3, 3] = xyz2np_array(hand.ring.bones[0].next_joint) + target_points[17, :3, 3] = xyz2np_array(hand.ring.bones[1].next_joint) + target_points[18, :3, 3] = xyz2np_array(hand.ring.bones[2].next_joint) + target_points[19, :3, 3] = xyz2np_array(hand.ring.bones[3].next_joint) + target_points[20, :3, 3] = xyz2np_array(hand.pinky.bones[0].prev_joint) + target_points[21, :3, 3] = xyz2np_array(hand.pinky.bones[0].next_joint) + target_points[22, :3, 3] = xyz2np_array(hand.pinky.bones[1].next_joint) + target_points[23, :3, 3] = xyz2np_array(hand.pinky.bones[2].next_joint) + target_points[24, :3, 3] = xyz2np_array(hand.pinky.bones[3].next_joint) + + return target_points + + +def get_fake_finger_points_from_pinch(hand: leap.datatypes.Hand) -> np.ndarray: + # print(hand.pinch_strength) + target_points = np.array([np.eye(4) for _ in range(25)]) + for i in range(25): + target_points[i] = np.eye(4) + + # Control thumb based on shoulder button state (index 4 is thumb tip) + if hand.pinch_strength > 0.3: + target_points[4, 0, 3] = 0.0 # closed + else: + target_points[4, 0, 3] = 1000.0 # open, in mm + + return target_points + + +def get_raw_wrist_pose(hand: leap.datatypes.Hand) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + hand_palm_position = xyz2np_array(hand.palm.position) + hand_palm_normal = np.array([-hand.palm.normal.z, -hand.palm.normal.x, hand.palm.normal.y]) + hand_palm_direction = np.array( + [-hand.palm.direction.z, -hand.palm.direction.x, hand.palm.direction.y] + ) + + return hand_palm_position, hand_palm_normal, hand_palm_direction + + +def get_finger_transform(target_points: np.ndarray, hand_type: str) -> np.ndarray: + pose_palm_root = target_points[0, :3, 3].copy() + rot_palm = target_points[0, :3, :3].copy() + + if hand_type == "right": + rot_leap2base = R.from_euler("z", [180], degrees=True).as_matrix() + rot_reverse = np.array( + [[[0, 0, 1], [0, -1, 0], [1, 0, 0]]] + ) # due to the target_base_rotation in hand IK solver + else: + rot_leap2base = np.eye(3) + rot_reverse = np.array([[[0, 0, -1], [0, -1, 0], [-1, 0, 0]]]) + offset = (target_points[:, :3, 3] - pose_palm_root).copy() / 1000.0 # mm to m + offset = offset @ rot_palm @ rot_leap2base @ rot_reverse + + target_points[:, :3, 3] = offset + + return target_points + + +def get_wrist_transformation( + hand_palm_position: np.ndarray, + hand_palm_normal: np.ndarray, + hand_palm_direction: np.ndarray, + hand_type: str, + pos_sensitivity: float = 1.0 / 800.0, +) -> np.ndarray: + T = np.eye(4) + T[:3, 3] = hand_palm_position[[2, 0, 1]] * pos_sensitivity * np.array([-1, -1, 1]) + direction_np = hand_palm_direction + palm_normal_np = hand_palm_normal + if hand_type == "left": + transform = R.from_euler("y", -90, degrees=True).as_matrix() + lh_thumb_np = -np.cross(direction_np, palm_normal_np) + rotation_matrix = np.array([direction_np, -palm_normal_np, lh_thumb_np]).T + else: + transform = R.from_euler("xy", [-180, 90], degrees=True).as_matrix() + rh_thumb_np = np.cross(direction_np, palm_normal_np) + rotation_matrix = np.array([direction_np, palm_normal_np, rh_thumb_np]).T + T[:3, :3] = np.dot(rotation_matrix, transform) + return T + + +def get_raw_data( + hands: List[leap.datatypes.Hand], data: Dict[str, np.ndarray] = None +) -> Dict[str, np.ndarray]: + if data is None: + data = {} + for hand in hands: + hand_type = "left" if str(hand.type) == "HandType.Left" else "right" + data[hand_type + "_wrist"] = get_raw_wrist_pose(hand) + # @runyud: this is a hack to get the finger points from the pinch strength + data[hand_type + "_fingers"] = get_fake_finger_points_from_pinch(hand) + assert len(data) == 4, f"Leapmotiondata length should be 4, but got {len(data)}" + return data + + +def process_data(raw_data: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + data = {} + for hand_type in ["left", "right"]: + data[hand_type + "_wrist"] = get_wrist_transformation( + *raw_data[hand_type + "_wrist"], hand_type + ) + data[hand_type + "_fingers"] = { + "position": get_finger_transform(raw_data[hand_type + "_fingers"], hand_type) + } + return data + + +class LeapMotionListener(leap.Listener): + def __init__(self, verbose=False): + self.data = None + self.data_lock = threading.Lock() + self.verbose = verbose + + def on_connection_event(self, event): + print("Connected") + + def on_device_event(self, event: leap.events.DeviceEvent): + try: + with event.device.open(): + info = event.device.get_info() + except AttributeError: + # Handle the case where LeapCannotOpenDeviceError is not available + try: + info = event.device.get_info() + except Exception as e: + print(f"Error opening device: {e}") + info = None + + print(f"Found Leap Motion device {info.serial}") + + def on_tracking_event(self, event: leap.events.TrackingEvent): + if ( + len(event.hands) == 2 or self.data is not None + ): # only when two hands are detected, we update the data + with self.data_lock: + self.data: Dict[str, np.ndarray] = get_raw_data(event.hands, self.data) + if self.verbose: + for hand in event.hands: + hand_type = "left" if str(hand.type) == "HandType.Left" else "right" + print( + f"Hand id {hand.id} is a {hand_type} hand with position" + f"({hand.palm.position.x}, {hand.palm.position.y}, {hand.palm.position.z})." + ) + + def get_data(self): + with self.data_lock: + return deepcopy(self.data) + + def reset_status(self): + """Reset the cache of the streamer.""" + with self.data_lock: + self.data = None + + +class FakeLeapMotionListener: + def __init__(self, data_path=None): + self.data_lock = threading.Lock() + with open(data_path, "rb") as f: + self.data_list = pickle.load(f) + self.data_index = 0 + + def get_data(self) -> Dict[str, np.ndarray]: + with self.data_lock: + data = deepcopy(self.data_list[self.data_index % len(self.data_list)]) + self.data_index += 1 + return data + + +# Note currently it is a auto-polling based streamer. +# The connection will start another thread to continue polling data +# and the listener will continue to receive data from the connection. +class LeapMotionStreamer(BaseStreamer): + """LeapMotion streamer that provides hand tracking data.""" + + def __init__(self, verbose=False, record_data=False, **kwargs): + self.connection = leap.Connection() + self.listener = LeapMotionListener(verbose=verbose) + self.connection.add_listener(self.listener) + # self.listener = FakeLeapMotionListener(data_path="leapmotion_data_rot.pkl") + + self.connection.set_tracking_mode(leap.TrackingMode.Desktop) + + self.record_data = record_data + self.data_queue = queue.Queue() + + def reset_status(self): + """Reset the cache of the streamer.""" + self.listener.reset_status() + while not self.listener.get_data(): + time.sleep(0.1) + + def start_streaming(self): + self.connection.connect() + print("Waiting for the first data...") + time.sleep(0.5) + while not self.listener.get_data(): + time.sleep(0.1) + print("First data received!") + + def stop_streaming(self): + self.connection.disconnect() + + def get(self) -> StreamerOutput: + """Return hand tracking data as StreamerOutput.""" + # Get raw data and save if recording + raw_data = self.listener.get_data() + if self.record_data: + self.data_queue.put(raw_data) + + # Process raw data into transformations + processed_data = process_data(raw_data) + + # Initialize IK data (ik_keys) - LeapMotion provides hand/finger tracking + ik_data = {} + for hand_type in ["left", "right"]: + # Add wrist poses and finger positions to IK data + ik_data[f"{hand_type}_wrist"] = processed_data[f"{hand_type}_wrist"] + ik_data[f"{hand_type}_fingers"] = processed_data[f"{hand_type}_fingers"] + + # Return structured output - LeapMotion only provides IK data + return StreamerOutput( + ik_data=ik_data, + control_data={}, # No control commands from LeapMotion + teleop_data={}, # No teleop commands from LeapMotion + source="leapmotion", + ) + + def dump_data_to_file(self): + """Save recorded data to file if recording was enabled.""" + if not self.record_data: + return + + data_list = [] + while not self.data_queue.empty(): + data_list.append(self.data_queue.get()) + with open("leapmotion_data_trans.pkl", "wb") as f: + pickle.dump(data_list, f) + + +if __name__ == "__main__": + streamer = LeapMotionStreamer(verbose=True) + streamer.start_streaming() + for _ in range(100): + streamer.get() + time.sleep(0.1) + streamer.stop_streaming() + streamer.dump_data_to_file() diff --git a/decoupled_wbc/control/teleop/streamers/manus_streamer.py b/decoupled_wbc/control/teleop/streamers/manus_streamer.py new file mode 100644 index 0000000..1756779 --- /dev/null +++ b/decoupled_wbc/control/teleop/streamers/manus_streamer.py @@ -0,0 +1,90 @@ +import pickle +import threading + +import zmq + +from decoupled_wbc.control.teleop.device.manus import Manus +from decoupled_wbc.control.teleop.streamers.base_streamer import BaseStreamer, StreamerOutput + + +class ManusStreamer(BaseStreamer): + def __init__(self, port=5556): + self.port = port + self.context = None + self.socket = None + self.manus_server = None + self.server_thread = None + + def request_data(self): + """Request the latest data from the server.""" + if self.socket is None: + raise RuntimeError("ManusStreamer not started. Call start_streaming() first.") + + # Send request to the server + self.socket.send(b"request_data") # Send a request message + + # Wait for the server's response + message = self.socket.recv() # Receive response + data = pickle.loads(message) # Deserialize the data + + return data + + def _run_server(self): + """Run the manus server in a separate thread.""" + with self.manus_server.activate(): + print("Manus server activated") + self.manus_server.run() + + def start_streaming(self): + """Start the manus server and establish connection.""" + if self.manus_server is not None: + return + + print(f"Starting manus server on port {self.port}...") + + # Create manus server instance + self.manus_server = Manus(port=self.port) + + # Start server in separate thread + self.server_thread = threading.Thread(target=self._run_server, daemon=True) + self.server_thread.start() + + # Establish ZMQ connection + self.context = zmq.Context() + self.socket = self.context.socket(zmq.REQ) + self.socket.connect(f"tcp://localhost:{self.port}") + + def get(self): + """Return hand tracking data as StreamerOutput.""" + raw_data = self.request_data() + + # Initialize IK data (ik_keys) - Manus provides hand/finger tracking + ik_data = {} + if isinstance(raw_data, dict): + # Extract finger and hand pose data + for key, value in raw_data.items(): + if "finger" in key.lower() or "hand" in key.lower(): + ik_data[key] = value + + # Return structured output - Manus only provides IK data + return StreamerOutput( + ik_data=ik_data, + control_data={}, # No control commands from Manus + teleop_data={}, # No teleop commands from Manus + source="manus", + ) + + def stop_streaming(self): + """Stop the manus server and close connections.""" + # Close ZMQ connection + if self.socket: + self.socket.close() + self.socket = None + + if self.context: + self.context.term() + self.context = None + + # Reset server references + self.manus_server = None + self.server_thread = None diff --git a/decoupled_wbc/control/teleop/streamers/pico_streamer.py b/decoupled_wbc/control/teleop/streamers/pico_streamer.py new file mode 100644 index 0000000..315e762 --- /dev/null +++ b/decoupled_wbc/control/teleop/streamers/pico_streamer.py @@ -0,0 +1,280 @@ +import subprocess +import time + +import numpy as np +from scipy.spatial.transform import Rotation as R + +from decoupled_wbc.control.teleop.device.pico.xr_client import XrClient +from decoupled_wbc.control.teleop.streamers.base_streamer import BaseStreamer, StreamerOutput + +R_HEADSET_TO_WORLD = np.array( + [ + [0, 0, -1], + [-1, 0, 0], + [0, 1, 0], + ] +) + + +class PicoStreamer(BaseStreamer): + def __init__(self): + self.xr_client = XrClient() + self.run_pico_service() + + self.reset_status() + + def run_pico_service(self): + # Run the pico service + self.pico_service_pid = subprocess.Popen( + ["bash", "/opt/apps/roboticsservice/runService.sh"] + ) + print(f"Pico service running with pid {self.pico_service_pid.pid}") + + def stop_pico_service(self): + # find pid and kill it + if self.pico_service_pid: + subprocess.Popen(["kill", "-9", str(self.pico_service_pid.pid)]) + print(f"Pico service killed with pid {self.pico_service_pid.pid}") + else: + print("Pico service not running") + + def reset_status(self): + self.current_base_height = 0.74 # Initial base height, 0.74m (standing height) + self.toggle_policy_action_last = False + self.toggle_activation_last = False + self.toggle_data_collection_last = False + self.toggle_data_abort_last = False + + def start_streaming(self): + pass + + def stop_streaming(self): + self.xr_client.close() + + def get(self) -> StreamerOutput: + pico_data = self._get_pico_data() + + raw_data = self._generate_unified_raw_data(pico_data) + return raw_data + + def __del__(self): + pass + + def _get_pico_data(self): + pico_data = {} + + # Get the pose of the left and right controllers and the headset + pico_data["left_pose"] = self.xr_client.get_pose_by_name("left_controller") + pico_data["right_pose"] = self.xr_client.get_pose_by_name("right_controller") + pico_data["head_pose"] = self.xr_client.get_pose_by_name("headset") + + # Get key value of the left and right controllers + pico_data["left_trigger"] = self.xr_client.get_key_value_by_name("left_trigger") + pico_data["right_trigger"] = self.xr_client.get_key_value_by_name("right_trigger") + pico_data["left_grip"] = self.xr_client.get_key_value_by_name("left_grip") + pico_data["right_grip"] = self.xr_client.get_key_value_by_name("right_grip") + + # Get button state of the left and right controllers + pico_data["A"] = self.xr_client.get_button_state_by_name("A") + pico_data["B"] = self.xr_client.get_button_state_by_name("B") + pico_data["X"] = self.xr_client.get_button_state_by_name("X") + pico_data["Y"] = self.xr_client.get_button_state_by_name("Y") + pico_data["left_menu_button"] = self.xr_client.get_button_state_by_name("left_menu_button") + pico_data["right_menu_button"] = self.xr_client.get_button_state_by_name( + "right_menu_button" + ) + pico_data["left_axis_click"] = self.xr_client.get_button_state_by_name("left_axis_click") + pico_data["right_axis_click"] = self.xr_client.get_button_state_by_name("right_axis_click") + + # Get the timestamp of the left and right controllers + pico_data["timestamp"] = self.xr_client.get_timestamp_ns() + + # Get the hand tracking state of the left and right controllers + pico_data["left_hand_tracking_state"] = self.xr_client.get_hand_tracking_state("left") + pico_data["right_hand_tracking_state"] = self.xr_client.get_hand_tracking_state("right") + + # Get the joystick state of the left and right controllers + pico_data["left_joystick"] = self.xr_client.get_joystick_state("left") + pico_data["right_joystick"] = self.xr_client.get_joystick_state("right") + + # Get the motion tracker data + pico_data["motion_tracker_data"] = self.xr_client.get_motion_tracker_data() + + # Get the body tracking data + pico_data["body_tracking_data"] = self.xr_client.get_body_tracking_data() + + return pico_data + + def _generate_unified_raw_data(self, pico_data): + # Get controller position and orientation in z up world frame + left_controller_T = self._process_xr_pose(pico_data["left_pose"], pico_data["head_pose"]) + right_controller_T = self._process_xr_pose(pico_data["right_pose"], pico_data["head_pose"]) + + # Get navigation commands + DEAD_ZONE = 0.1 + MAX_LINEAR_VEL = 0.5 # m/s + MAX_ANGULAR_VEL = 1.0 # rad/s + + fwd_bwd_input = pico_data["left_joystick"][1] + strafe_input = -pico_data["left_joystick"][0] + yaw_input = -pico_data["right_joystick"][0] + + lin_vel_x = self._apply_dead_zone(fwd_bwd_input, DEAD_ZONE) * MAX_LINEAR_VEL + lin_vel_y = self._apply_dead_zone(strafe_input, DEAD_ZONE) * MAX_LINEAR_VEL + ang_vel_z = self._apply_dead_zone(yaw_input, DEAD_ZONE) * MAX_ANGULAR_VEL + + # Get base height command + height_increment = 0.01 # Small step per call when button is pressed + if pico_data["Y"]: + self.current_base_height += height_increment + elif pico_data["X"]: + self.current_base_height -= height_increment + self.current_base_height = np.clip(self.current_base_height, 0.2, 0.74) + + # Get gripper commands + left_fingers = self._generate_finger_data(pico_data, "left") + right_fingers = self._generate_finger_data(pico_data, "right") + + # Get activation commands + toggle_policy_action_tmp = pico_data["left_menu_button"] and ( + pico_data["left_trigger"] > 0.5 + ) + toggle_activation_tmp = pico_data["left_menu_button"] and (pico_data["right_trigger"] > 0.5) + + if self.toggle_policy_action_last != toggle_policy_action_tmp: + toggle_policy_action = toggle_policy_action_tmp + else: + toggle_policy_action = False + self.toggle_policy_action_last = toggle_policy_action_tmp + + if self.toggle_activation_last != toggle_activation_tmp: + toggle_activation = toggle_activation_tmp + else: + toggle_activation = False + self.toggle_activation_last = toggle_activation_tmp + + # Get data collection commands + toggle_data_collection_tmp = pico_data["A"] + toggle_data_abort_tmp = pico_data["B"] + + if self.toggle_data_collection_last != toggle_data_collection_tmp: + toggle_data_collection = toggle_data_collection_tmp + else: + toggle_data_collection = False + self.toggle_data_collection_last = toggle_data_collection_tmp + + if self.toggle_data_abort_last != toggle_data_abort_tmp: + toggle_data_abort = toggle_data_abort_tmp + else: + toggle_data_abort = False + self.toggle_data_abort_last = toggle_data_abort_tmp + + # print(f"toggle_data_collection: {toggle_data_collection}, toggle_data_abort: {toggle_data_abort}") + + return StreamerOutput( + ik_data={ + "left_wrist": left_controller_T, + "right_wrist": right_controller_T, + "left_fingers": {"position": left_fingers}, + "right_fingers": {"position": right_fingers}, + }, + control_data={ + "base_height_command": self.current_base_height, + "navigate_cmd": [lin_vel_x, lin_vel_y, ang_vel_z], + "toggle_policy_action": toggle_policy_action, + }, + teleop_data={ + "toggle_activation": toggle_activation, + }, + data_collection_data={ + "toggle_data_collection": toggle_data_collection, + "toggle_data_abort": toggle_data_abort, + }, + source="pico", + ) + + def _process_xr_pose(self, controller_pose, headset_pose): + # Convert controller pose to x, y, z, w quaternion + xr_pose_xyz = np.array(controller_pose)[:3] # x, y, z + xr_pose_quat = np.array(controller_pose)[3:] # x, y, z, w + + # Handle all-zero quaternion case by using identity quaternion + if np.allclose(xr_pose_quat, 0): + xr_pose_quat = np.array([0, 0, 0, 1]) # identity quaternion: x, y, z, w + + # Convert from y up to z up + xr_pose_xyz = R_HEADSET_TO_WORLD @ xr_pose_xyz + xr_pose_rotation = R.from_quat(xr_pose_quat).as_matrix() + xr_pose_rotation = R_HEADSET_TO_WORLD @ xr_pose_rotation @ R_HEADSET_TO_WORLD.T + + # Convert headset pose to x, y, z, w quaternion + headset_pose_xyz = np.array(headset_pose)[:3] + headset_pose_quat = np.array(headset_pose)[3:] + + if np.allclose(headset_pose_quat, 0): + headset_pose_quat = np.array([0, 0, 0, 1]) # identity quaternion: x, y, z, w + + # Convert from y up to z up + headset_pose_xyz = R_HEADSET_TO_WORLD @ headset_pose_xyz + headset_pose_rotation = R.from_quat(headset_pose_quat).as_matrix() + headset_pose_rotation = R_HEADSET_TO_WORLD @ headset_pose_rotation @ R_HEADSET_TO_WORLD.T + + # Calculate the delta between the controller and headset positions + xr_pose_xyz_delta = xr_pose_xyz - headset_pose_xyz + + # Calculate the yaw of the headset + R_headset_to_world = R.from_matrix(headset_pose_rotation) + headset_pose_yaw = R_headset_to_world.as_euler("xyz")[2] # Extract yaw (Z-axis rotation) + inverse_yaw_rotation = R.from_euler("z", -headset_pose_yaw).as_matrix() + + # Align with headset yaw to controller position delta and rotation + xr_pose_xyz_delta_compensated = inverse_yaw_rotation @ xr_pose_xyz_delta + xr_pose_rotation_compensated = inverse_yaw_rotation @ xr_pose_rotation + + xr_pose_T = np.eye(4) + xr_pose_T[:3, :3] = xr_pose_rotation_compensated + xr_pose_T[:3, 3] = xr_pose_xyz_delta_compensated + return xr_pose_T + + def _apply_dead_zone(self, value, dead_zone): + """Apply dead zone and normalize.""" + if abs(value) < dead_zone: + return 0.0 + sign = 1 if value > 0 else -1 + # Normalize the output to be between -1 and 1 after dead zone + return sign * (abs(value) - dead_zone) / (1.0 - dead_zone) + + def _generate_finger_data(self, pico_data, hand): + """Generate finger position data.""" + fingertips = np.zeros([25, 4, 4]) + + thumb = 0 + index = 5 + middle = 10 + ring = 15 + + # Control thumb based on shoulder button state (index 4 is thumb tip) + fingertips[4 + thumb, 0, 3] = 1.0 # open thumb + if not pico_data["left_menu_button"]: + if pico_data[f"{hand}_trigger"] > 0.5 and not pico_data[f"{hand}_grip"] > 0.5: + fingertips[4 + index, 0, 3] = 1.0 # close index + elif pico_data[f"{hand}_trigger"] > 0.5 and pico_data[f"{hand}_grip"] > 0.5: + fingertips[4 + middle, 0, 3] = 1.0 # close middle + elif not pico_data[f"{hand}_trigger"] > 0.5 and pico_data[f"{hand}_grip"] > 0.5: + fingertips[4 + ring, 0, 3] = 1.0 # close ring + + return fingertips + + +if __name__ == "__main__": + # from decoupled_wbc.control.utils.debugger import wait_for_debugger + # wait_for_debugger() + + streamer = PicoStreamer() + streamer.start_streaming() + while True: + raw_data = streamer.get() + print( + f"left_wrist: {raw_data.ik_data['left_wrist']}, right_wrist: {raw_data.ik_data['right_wrist']}" + ) + time.sleep(0.1) diff --git a/gr00t_wbc/control/teleop/streamers/vive_streamer.py b/decoupled_wbc/control/teleop/streamers/vive_streamer.py similarity index 100% rename from gr00t_wbc/control/teleop/streamers/vive_streamer.py rename to decoupled_wbc/control/teleop/streamers/vive_streamer.py diff --git a/decoupled_wbc/control/teleop/teleop_retargeting_ik.py b/decoupled_wbc/control/teleop/teleop_retargeting_ik.py new file mode 100644 index 0000000..0bbf7d0 --- /dev/null +++ b/decoupled_wbc/control/teleop/teleop_retargeting_ik.py @@ -0,0 +1,148 @@ +import time +from typing import List, Optional + +import numpy as np + +from decoupled_wbc.control.base.policy import Policy +from decoupled_wbc.control.robot_model.robot_model import ReducedRobotModel, RobotModel +from decoupled_wbc.control.teleop.solver.body.body_ik_solver import BodyIKSolver +from decoupled_wbc.control.teleop.solver.body.body_ik_solver_settings import BodyIKSolverSettings +from decoupled_wbc.control.teleop.solver.solver import Solver +from decoupled_wbc.control.visualization.humanoid_visualizer import RobotVisualizer + + +class TeleopRetargetingIK(Policy): + """ + Robot-agnostic teleop retargeting inverse kinematics code. + Focus only on IK processing, ignore commands. + """ + + def __init__( + self, + robot_model: RobotModel, + left_hand_ik_solver: Solver, + right_hand_ik_solver: Solver, + enable_visualization=False, + body_active_joint_groups: Optional[List[str]] = None, + body_ik_solver_settings_type: str = "default", + ): + # initialize the body + if body_active_joint_groups is not None: + self.body = ReducedRobotModel.from_active_groups(robot_model, body_active_joint_groups) + self.full_robot = self.body.full_robot + self.using_reduced_robot_model = True + else: + self.body = robot_model + self.full_robot = self.body + self.using_reduced_robot_model = False + if body_ik_solver_settings_type == "default": + body_ik_solver_settings = BodyIKSolverSettings() + else: + raise ValueError( + f"Unknown body_ik_solver_settings_type: {body_ik_solver_settings_type}" + ) + self.body_ik_solver = BodyIKSolver(body_ik_solver_settings) + + # We register the specific robot model to the robot-agnostic body IK solver class + self.body_ik_solver.register_robot(self.body) + + # Hand IK solvers are hand specific, so we pass them in the constructor + self.left_hand_ik_solver = left_hand_ik_solver + self.right_hand_ik_solver = right_hand_ik_solver + + # enable visualizer + self.enable_visualization = enable_visualization + if self.enable_visualization: + self.visualizer = RobotVisualizer(self.full_robot) + self.visualizer.visualize(self.full_robot.q_zero) + time.sleep(1) # wait for the visualizer to start + + self.in_warmup = True + self._most_recent_ik_data = None + self._most_recent_q = self.full_robot.default_body_pose.copy() + + def compute_joint_positions( + self, body_data: dict, left_hand_data: dict, right_hand_data: dict + ) -> np.ndarray: + """Process only IK-related data, return joint positions""" + if self.in_warmup: + # TODO: Warmup is not necessary if we start IK from the current robot qpos, rather than the zero qpos + for _ in range(50): + target_robot_joints = self._inverse_kinematics( + body_data, left_hand_data, right_hand_data + ) + self.in_warmup = False + else: + target_robot_joints = self._inverse_kinematics( + body_data, left_hand_data, right_hand_data + ) + + return target_robot_joints + + def _inverse_kinematics( + self, + body_target_pose, + left_hand_target_pose, + right_hand_target_pose, + ): + """ + Solve the inverse kinematics problem for the given target poses. + Args: + body_target_pose: Dictionary of link names and their corresponding target pose. + left_hand_target_pose: Dictionary with key "position" mapping to a (25, 4, 4) np.ndarray from AVP data + right_hand_target_pose: Dictionary with key "position" mapping to a (25, 4, 4) np.ndarray from AVP data + q: Initial configuration vector. + Returns: + Configuration vector that achieves the target poses. + """ + if body_target_pose: + if self.using_reduced_robot_model: + body_q = self.body.reduced_to_full_configuration( + self.body_ik_solver(body_target_pose) + ) + else: + body_q = self.body_ik_solver(body_target_pose) + else: + # If no body target pose is provided, set the body to the default pose + body_q = self.full_robot.default_body_pose.copy() + + if left_hand_target_pose is not None: + left_hand_actuated_q = self.left_hand_ik_solver(left_hand_target_pose) + body_q[self.full_robot.get_hand_actuated_joint_indices(side="left")] = ( + left_hand_actuated_q + ) + + if right_hand_target_pose is not None: + right_hand_actuated_q = self.right_hand_ik_solver(right_hand_target_pose) + body_q[self.full_robot.get_hand_actuated_joint_indices(side="right")] = ( + right_hand_actuated_q + ) + + if self.enable_visualization: + self.visualizer.visualize(np.array(body_q)) + + return body_q + + def reset(self): + """Reset the robot model and IK solvers to the initial state, and re-activate the warmup procedure.""" + self.body.reset_forward_kinematics() # self.body is the same one as self.body_ik_solver.robot + self.full_robot.reset_forward_kinematics() + self.body_ik_solver.initialize() + # If in the future, the hand IK solver has initialize method, call it + self._most_recent_ik_data = None + self._most_recent_q = self.full_robot.default_body_pose.copy() + self.in_warmup = True + + def set_goal(self, ik_data: dict): + self._most_recent_ik_data = ik_data + + def get_action(self) -> dict[str, any]: + # Process IK if active + if self._most_recent_ik_data is not None: + body_data = self._most_recent_ik_data["body_data"] + left_hand_data = self._most_recent_ik_data["left_hand_data"] + right_hand_data = self._most_recent_ik_data["right_hand_data"] + target_joints = self.compute_joint_positions(body_data, left_hand_data, right_hand_data) + self._most_recent_q = target_joints + + return self._most_recent_q[self.full_robot.get_joint_group_indices("upper_body")] diff --git a/decoupled_wbc/control/teleop/teleop_streamer.py b/decoupled_wbc/control/teleop/teleop_streamer.py new file mode 100644 index 0000000..df761a7 --- /dev/null +++ b/decoupled_wbc/control/teleop/teleop_streamer.py @@ -0,0 +1,240 @@ +from math import floor +import pickle +from typing import Optional + +from decoupled_wbc.control.robot_model.robot_model import RobotModel +from decoupled_wbc.control.teleop.pre_processor.fingers.fingers import FingersPreProcessor +from decoupled_wbc.control.teleop.pre_processor.wrists.wrists import WristsPreProcessor +from decoupled_wbc.control.teleop.streamers.base_streamer import StreamerOutput + + +class TeleopStreamer: + def __init__( + self, + robot_model: RobotModel, + body_control_device: Optional[str] = None, + hand_control_device: Optional[str] = None, + enable_real_device=True, + body_streamer_ip="", + body_streamer_keyword="", + replay_data_path: Optional[str] = None, + replay_speed: float = 1.0, + ): + # initialize the body + self.body = robot_model + + self.body_control_device = body_control_device + self.hand_control_device = hand_control_device + self.body_streamer_ip = body_streamer_ip + self.body_streamer_keyword = body_streamer_keyword + self.replay_speed = replay_speed + + # enable real robot and devices + self.enable_real_device = enable_real_device + if self.enable_real_device: + if body_control_device == "vive": + from decoupled_wbc.control.teleop.streamers.vive_streamer import ViveStreamer + + self.body_streamer = ViveStreamer( + ip=self.body_streamer_ip, keyword=self.body_streamer_keyword + ) + self.body_streamer.start_streaming() + elif body_control_device == "iphone": + from decoupled_wbc.control.teleop.streamers.iphone_streamer import IphoneStreamer + + self.body_streamer = IphoneStreamer() + self.body_streamer.start_streaming() + elif body_control_device == "leapmotion": + from decoupled_wbc.control.teleop.streamers.leapmotion_streamer import ( + LeapMotionStreamer, + ) + + self.body_streamer = LeapMotionStreamer() + self.body_streamer.start_streaming() + elif body_control_device == "joycon": + from decoupled_wbc.control.teleop.streamers.joycon_streamer import JoyconStreamer + + self.body_streamer = JoyconStreamer() + self.body_streamer.start_streaming() + + elif body_control_device == "pico": + from decoupled_wbc.control.teleop.streamers.pico_streamer import PicoStreamer + + self.body_streamer = PicoStreamer() + self.body_streamer.start_streaming() + elif body_control_device == "dummy": + from decoupled_wbc.control.teleop.streamers.dummy_streamer import DummyStreamer + + self.body_streamer = DummyStreamer() + self.body_streamer.start_streaming() + else: + self.body_streamer = None + + if hand_control_device and hand_control_device != body_control_device: + if hand_control_device == "manus": + from decoupled_wbc.control.teleop.streamers.manus_streamer import ManusStreamer + + self.hand_streamer = ManusStreamer() + self.hand_streamer.start_streaming() + elif hand_control_device == "joycon": + from decoupled_wbc.control.teleop.streamers.joycon_streamer import JoyconStreamer + + self.hand_streamer = JoyconStreamer() + self.hand_streamer.start_streaming() + elif hand_control_device == "iphone": + from decoupled_wbc.control.teleop.streamers.iphone_streamer import IphoneStreamer + + self.hand_streamer = IphoneStreamer() + self.hand_streamer.start_streaming() + elif hand_control_device == "pico": + from decoupled_wbc.control.teleop.streamers.pico_streamer import PicoStreamer + + self.hand_streamer = PicoStreamer() + self.hand_streamer.start_streaming() + else: + self.hand_streamer = None + else: + self.hand_streamer = None + else: + self.body_streamer = None + self.hand_streamer = None + + self.raw_replay_data = None + self.replay_calibration_data = None + self.replay_mode = False + if replay_data_path and not self.enable_real_device: + with open(replay_data_path, "rb") as f: + data_ = pickle.load(f) + self.raw_replay_data = data_["replay_data"] + self.replay_calibration_data = data_["calibration_data"] + print("Found teleop replay data in file: ", replay_data_path) + self.replay_idx = 0 + self.replay_mode = True + + # initialize pre_processors + self.body_control_device = body_control_device + if body_control_device or self.replay_mode: + self.body_pre_processor = WristsPreProcessor( + motion_scale=robot_model.supplemental_info.teleop_upper_body_motion_scale + ) + self.body_pre_processor.register(self.body) + else: + self.body_pre_processor = None + + # initialize hand pre-processors and post-processors + self.hand_control_device = hand_control_device + if hand_control_device or self.replay_mode: + self.left_hand_pre_processor = FingersPreProcessor(side="left") + self.right_hand_pre_processor = FingersPreProcessor(side="right") + + else: + self.left_hand_pre_processor = None + self.right_hand_pre_processor = None + + self.is_calibrated = False + + def _get_replay_data(self) -> StreamerOutput: + streamer_data = StreamerOutput() + + if self.replay_idx < len(self.raw_replay_data): + streamer_data.ik_data.update( + self.raw_replay_data[floor(self.replay_idx / self.replay_speed)] + ) + self.replay_idx += 1 + + return streamer_data + + def _get_live_data(self) -> StreamerOutput: + """Get structured data instead of raw dict""" + if self.body_streamer: + streamer_data = self.body_streamer.get() + else: + streamer_data = StreamerOutput() + + if self.hand_streamer and self.hand_streamer != self.body_streamer: + hand_data = self.hand_streamer.get() + + # Merge hand data into body data (hand data takes precedence) + streamer_data.ik_data.update(hand_data.ik_data) + streamer_data.control_data.update(hand_data.control_data) + streamer_data.teleop_data.update(hand_data.teleop_data) + streamer_data.data_collection_data.update(hand_data.data_collection_data) + + return streamer_data + + def get_streamer_data(self) -> StreamerOutput: + if self.enable_real_device: + streamer_data = self._get_live_data() + elif self.replay_mode: + streamer_data = self._get_replay_data() + else: + streamer_data = StreamerOutput() + + if self.is_calibrated and streamer_data.ik_data: + body_data, left_hand_data, right_hand_data = self.pre_process(streamer_data.ik_data) + streamer_data.ik_data = { + "body_data": body_data, + "left_hand_data": left_hand_data, + "right_hand_data": right_hand_data, + } + elif not self.is_calibrated: + streamer_data.ik_data = {} + + return streamer_data + + def calibrate(self): + """Calibrate the pre-processors using only IK data.""" + if self.replay_mode: + ik_data = self.replay_calibration_data + else: + streamer_data = self._get_live_data() + ik_data = streamer_data.ik_data + + if self.body_pre_processor: + self.body_pre_processor.calibrate(ik_data, self.body_control_device) + if self.left_hand_pre_processor: + self.left_hand_pre_processor.calibrate(ik_data, self.hand_control_device) + if self.right_hand_pre_processor: + self.right_hand_pre_processor.calibrate(ik_data, self.hand_control_device) + + self.is_calibrated = True + + def pre_process(self, raw_data): + """Pre-process the raw data.""" + assert ( + self.body_pre_processor or self.left_hand_pre_processor or self.right_hand_pre_processor + ), "Pre-processors are not initialized." + + # Check if finger data is present in raw_data + has_finger_data = "left_fingers" in raw_data and "right_fingers" in raw_data + + if self.body_pre_processor: + body_data = self.body_pre_processor(raw_data) + # Only process hand data if finger keys are present and preprocessors are available + if has_finger_data and self.left_hand_pre_processor and self.right_hand_pre_processor: + left_hand_data = self.left_hand_pre_processor(raw_data) + right_hand_data = self.right_hand_pre_processor(raw_data) + return body_data, left_hand_data, right_hand_data + else: + return body_data, None, None + else: # only hands + if has_finger_data and self.left_hand_pre_processor and self.right_hand_pre_processor: + left_hand_data = self.left_hand_pre_processor(raw_data) + right_hand_data = self.right_hand_pre_processor(raw_data) + return None, left_hand_data, right_hand_data + else: + # No finger data available, return None for hand data + return None, None, None + + def reset(self): + if self.body_streamer is not None: + self.body_streamer.reset_status() + if self.hand_streamer is not None: + self.hand_streamer.reset_status() + + def stop_streaming(self): + if self.body_streamer: + self.body_streamer.stop_streaming() + # Only stop hand_streamer if it's a different instance than body_streamer + if self.hand_streamer and self.hand_streamer is not self.body_streamer: + self.hand_streamer.stop_streaming() diff --git a/gr00t_wbc/control/utils/__init__.py b/decoupled_wbc/control/utils/__init__.py similarity index 100% rename from gr00t_wbc/control/utils/__init__.py rename to decoupled_wbc/control/utils/__init__.py diff --git a/gr00t_wbc/control/utils/cv_bridge.py b/decoupled_wbc/control/utils/cv_bridge.py similarity index 100% rename from gr00t_wbc/control/utils/cv_bridge.py rename to decoupled_wbc/control/utils/cv_bridge.py diff --git a/gr00t_wbc/control/utils/episode_state.py b/decoupled_wbc/control/utils/episode_state.py similarity index 100% rename from gr00t_wbc/control/utils/episode_state.py rename to decoupled_wbc/control/utils/episode_state.py diff --git a/gr00t_wbc/control/utils/gear_wbc_utils.py b/decoupled_wbc/control/utils/gear_wbc_utils.py similarity index 100% rename from gr00t_wbc/control/utils/gear_wbc_utils.py rename to decoupled_wbc/control/utils/gear_wbc_utils.py diff --git a/gr00t_wbc/control/utils/img_viewer.py b/decoupled_wbc/control/utils/img_viewer.py similarity index 100% rename from gr00t_wbc/control/utils/img_viewer.py rename to decoupled_wbc/control/utils/img_viewer.py diff --git a/decoupled_wbc/control/utils/keyboard_dispatcher.py b/decoupled_wbc/control/utils/keyboard_dispatcher.py new file mode 100644 index 0000000..e6b9162 --- /dev/null +++ b/decoupled_wbc/control/utils/keyboard_dispatcher.py @@ -0,0 +1,255 @@ +import os +import subprocess +import sys +import threading + +import rclpy +from sshkeyboard import listen_keyboard, stop_listening +from std_msgs.msg import String as RosStringMsg + +from decoupled_wbc.control.main.constants import KEYBOARD_INPUT_TOPIC + +# Global variable to store original terminal attributes +_original_terminal_attrs = None + + +def save_terminal_state(): + """Save the current terminal state.""" + global _original_terminal_attrs + try: + import termios + + fd = sys.stdin.fileno() + _original_terminal_attrs = termios.tcgetattr(fd) + except (ImportError, OSError, termios.error): + _original_terminal_attrs = None + + +def restore_terminal(): + """Restore terminal to original state.""" + global _original_terminal_attrs + try: + import termios + + if _original_terminal_attrs is not None: + fd = sys.stdin.fileno() + termios.tcsetattr(fd, termios.TCSANOW, _original_terminal_attrs) + return + except (ImportError, OSError, termios.error): + pass + + # Fallback for non-Unix systems or if termios fails + try: + if os.name == "posix": + os.system("stty sane") + except OSError: + pass + + +class ROSKeyboardDispatcher: + """ROS-based keyboard dispatcher that receives keyboard events via ROS topics.""" + + def __init__(self): + self.listeners = [] + self._active = False + assert rclpy.ok(), "Expected ROS2 to be initialized in this process..." + executor = rclpy.get_global_executor() + self.node = executor.get_nodes()[0] + print("creating keyboard input subscriber...") + self.subscription = self.node.create_subscription( + RosStringMsg, KEYBOARD_INPUT_TOPIC, self._callback, 10 + ) + + def register(self, listener): + if not hasattr(listener, "handle_keyboard_button"): + raise NotImplementedError("handle_keyboard_button is not implemented") + self.listeners.append(listener) + + def start(self): + """Start the ROS keyboard dispatcher.""" + self._active = True + print("ROS keyboard dispatcher started") + + def stop(self): + """Stop the ROS keyboard dispatcher and cleanup.""" + if self._active: + self._active = False + # Clean up subscription + if hasattr(self, "subscription"): + self.node.destroy_subscription(self.subscription) + print("ROS keyboard dispatcher stopped") + + def _callback(self, msg: RosStringMsg): + if self._active: + for listener in self.listeners: + listener.handle_keyboard_button(msg.data) + + def __del__(self): + """Cleanup when object is destroyed.""" + self.stop() + + +class KeyboardDispatcher: + def __init__(self): + self.listeners = [] + self._listening_thread = None + self._stop_event = threading.Event() + self._key = None + + def register(self, listener): + # raise if handle_keyboard_button is not implemented + # TODO(YL): let listener be a Callable instead of a class + if not hasattr(listener, "handle_keyboard_button"): + raise NotImplementedError("handle_keyboard_button is not implemented") + self.listeners.append(listener) + + def handle_key(self, key): + # Check if we should stop + if self._stop_event.is_set(): + stop_listening() + return + + for listener in self.listeners: + listener.handle_keyboard_button(key) + + def start_listening(self): + try: + save_terminal_state() # Save original terminal state before listening + listen_keyboard( + on_press=self.handle_key, + delay_second_char=0.1, + delay_other_chars=0.05, + sleep=0.01, + ) + except Exception as e: + print(f"Keyboard listener stopped: {e}") + finally: + # Ensure terminal is restored even if an exception occurs + self._restore_terminal() + + def start(self): + self._listening_thread = threading.Thread(target=self.start_listening, daemon=True) + self._listening_thread.start() + + def stop(self): + """Stop the keyboard listener and restore terminal settings.""" + if self._listening_thread and self._listening_thread.is_alive(): + self._stop_event.set() + # Force stop_listening to be called + try: + stop_listening() + except Exception: + pass + # Wait a bit for the thread to finish + self._listening_thread.join(timeout=0.5) + # Restore terminal settings + self._restore_terminal() + + def _restore_terminal(self): + """Restore terminal to a sane state.""" + restore_terminal() + + def __del__(self): + """Cleanup when object is destroyed.""" + self.stop() + + +KEYBOARD_LISTENER_TOPIC_NAME = "/Gr00tKeyboardListener" + + +class KeyboardListener: + def __init__(self): + self.key = None + + def handle_keyboard_button(self, key): + self.key = key + + def pop_key(self): + key = self.key + self.key = None + return key + + +class KeyboardListenerPublisher: + def __init__(self, topic_name: str = KEYBOARD_LISTENER_TOPIC_NAME): + """ + Initialize keyboard listener for remote teleop with simplified interface. + + Args: + remote_system: RemoteSystem instance + control_channel_name: Name of the control channel + """ + assert rclpy.ok(), "Expected ROS2 to be initialized in this process..." + executor = rclpy.get_global_executor() + self.node = executor.get_nodes()[0] + self.publisher = self.node.create_publisher(RosStringMsg, topic_name, 1) + + def handle_keyboard_button(self, key): + self.publisher.publish(RosStringMsg(data=key)) + + +class KeyboardListenerSubscriber: + def __init__( + self, + topic_name: str = KEYBOARD_LISTENER_TOPIC_NAME, + node_name: str = "keyboard_listener_subscriber", + ): + assert rclpy.ok(), "Expected ROS2 to be initialized in this process..." + executor = rclpy.get_global_executor() + nodes = executor.get_nodes() + if nodes: + self.node = nodes[0] + self._create_node = False + else: + self.node = rclpy.create_node("KeyboardListenerSubscriber") + executor.add_node(self.node) + self._create_node = True + self.subscriber = self.node.create_subscription(RosStringMsg, topic_name, self._callback, 1) + self._data = None + + def _callback(self, msg: RosStringMsg): + self._data = msg.data + + def read_msg(self): + data = self._data + self._data = None + return data + + +class KeyboardEStop: + def __init__(self): + """Initialize KeyboardEStop with automatic tmux cleanup detection.""" + # Automatically create tmux cleanup if in deployment mode + self.cleanup_callback = self._create_tmux_cleanup_callback() + + def _create_tmux_cleanup_callback(self): + """Create a cleanup callback that kills the tmux session if running in deployment mode.""" + tmux_session = os.environ.get("DECOUPLED_WBC_TMUX_SESSION") + + def cleanup_callback(): + if tmux_session: + print(f"Emergency stop: Killing tmux session '{tmux_session}'...") + try: + subprocess.run(["tmux", "kill-session", "-t", tmux_session], timeout=5) + print("Tmux session terminated successfully.") + except subprocess.TimeoutExpired: + print("Warning: Tmux session termination timed out, forcing kill...") + try: + subprocess.run(["tmux", "kill-session", "-t", tmux_session, "-9"]) + except Exception: + pass + except Exception as e: + print(f"Warning: Error during tmux cleanup: {e}") + # If tmux cleanup fails, fallback to immediate exit + restore_terminal() + os._exit(1) + else: + print("Emergency stop: No tmux session, exiting normally...") + sys.exit(1) + + return cleanup_callback + + def handle_keyboard_button(self, key): + if key == "`": + print("Emergency stop triggered - running cleanup...") + self.cleanup_callback() diff --git a/decoupled_wbc/control/utils/logging_utils.py b/decoupled_wbc/control/utils/logging_utils.py new file mode 100644 index 0000000..3fa5f06 --- /dev/null +++ b/decoupled_wbc/control/utils/logging_utils.py @@ -0,0 +1,83 @@ +""" +Simple logging utilities for teleop evaluation. +""" + +from datetime import datetime +import os +from pathlib import Path +from typing import Any, Dict + +import numpy as np + +import decoupled_wbc + + +class EvaluationLogger: + """Simple logger that writes evaluation metrics to a timestamped file.""" + + def __init__(self, log_subdir: str = "logs_teleop"): + """ + Initialize the evaluation logger. + + Args: + log_subdir: Subdirectory name under project root for logs + """ + self.log_subdir = log_subdir + self._setup_logging() + + def _setup_logging(self): + """Setup simple file logging with timestamp-based filename""" + # Get project root directory + project_root = Path(os.path.dirname(decoupled_wbc.__file__)).parent + + # Create logs directory if it doesn't exist + self.logs_dir = project_root / self.log_subdir + self.logs_dir.mkdir(exist_ok=True) + + # Create timestamp-based filename + timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + log_filename = f"eval_{timestamp}.log" + self.log_file_path = self.logs_dir / log_filename + + # Open file for writing + self.log_file = open(self.log_file_path, "w") + + def write(self, message: str): + """Write a message to the log file.""" + self.log_file.write(message + "\n") + self.log_file.flush() # Ensure immediate write + + def log_metrics(self, metrics: Dict[str, Any]): + """ + Log evaluation metrics to file - just like the original print statements. + + Args: + metrics: Dictionary of metric names and values + """ + for metric_name, value in metrics.items(): + if isinstance(value, np.ndarray): + # Convert numpy arrays to readable format + if value.size == 1: + self.write(f"{metric_name}: {value.item()}") + else: + self.write(f"{metric_name}: {value}") + else: + self.write(f"{metric_name}: {value}") + + def get_log_file_path(self) -> Path: + """Get the path to the current log file.""" + return self.log_file_path + + def print(self): + """Print out the contents of the log file.""" + if self.log_file_path.exists(): + with open(self.log_file_path, "r") as f: + content = f.read() + print(content) + else: + print(f"Log file not found: {self.log_file_path}") + + def close(self): + """Close the log file.""" + if hasattr(self, "log_file") and self.log_file: + self.log_file.close() diff --git a/decoupled_wbc/control/utils/n1_utils.py b/decoupled_wbc/control/utils/n1_utils.py new file mode 100644 index 0000000..042f328 --- /dev/null +++ b/decoupled_wbc/control/utils/n1_utils.py @@ -0,0 +1,226 @@ +from typing import Any, Dict, SupportsFloat, Tuple + +import gymnasium as gym +from gymnasium import spaces +import numpy as np + +from decoupled_wbc.control.main.constants import DEFAULT_BASE_HEIGHT, DEFAULT_NAV_CMD +from decoupled_wbc.control.main.teleop.configs.configs import SyncSimDataCollectionConfig +from decoupled_wbc.control.policy.wbc_policy_factory import get_wbc_policy +from decoupled_wbc.control.robot_model import RobotModel +from decoupled_wbc.control.robot_model.instantiation import get_robot_type_and_model + + +class WholeBodyControlWrapper(gym.Wrapper): + """Gymnasium wrapper to integrate whole-body control for locomotion/manipulation sims.""" + + def __init__(self, env, script_config): + super().__init__(env) + self.script_config = script_config + self.script_config["robot"] = env.unwrapped.robot_name + self.wbc_policy = self.setup_wbc_policy() + self._action_space = self._wbc_action_space() + + @property + def robot_model(self) -> RobotModel: + """Return the robot model from the wrapped environment.""" + return self.env.unwrapped.robot_model # type: ignore + + def reset(self, **kwargs): + obs, info = self.env.reset(**kwargs) + self.wbc_policy = self.setup_wbc_policy() + self.wbc_policy.set_observation(obs) + return obs, info + + def step(self, action: Dict[str, Any]) -> Tuple[Any, SupportsFloat, bool, bool, Dict[str, Any]]: + action_dict = concat_action(self.robot_model, action) + + wbc_goal = {} + for key in ["navigate_cmd", "base_height_command", "target_upper_body_pose"]: + if key in action_dict: + wbc_goal[key] = action_dict[key] + + self.wbc_policy.set_goal(wbc_goal) + wbc_action = self.wbc_policy.get_action() + + result = super().step(wbc_action) + self.wbc_policy.set_observation(result[0]) + return result + + def setup_wbc_policy(self): + robot_type, robot_model = get_robot_type_and_model( + self.script_config["robot"], + enable_waist_ik=self.script_config.get("enable_waist", False), + ) + config = SyncSimDataCollectionConfig.from_dict(self.script_config) + config.update( + { + "save_img_obs": False, + "ik_indicator": False, + "enable_real_device": False, + "replay_data_path": None, + } + ) + wbc_config = config.load_wbc_yaml() + wbc_config["upper_body_policy_type"] = "identity" + wbc_policy = get_wbc_policy(robot_type, robot_model, wbc_config, init_time=0.0) + self.total_dofs = len(robot_model.get_joint_group_indices("upper_body")) + wbc_policy.activate_policy() + return wbc_policy + + def _get_joint_group_size(self, group_name: str) -> int: + """Return the number of joints in a group, cached since lookup is static.""" + if not hasattr(self, "_joint_group_size_cache"): + self._joint_group_size_cache = {} + if group_name not in self._joint_group_size_cache: + self._joint_group_size_cache[group_name] = len( + self.robot_model.get_joint_group_indices(group_name) + ) + return self._joint_group_size_cache[group_name] + + def _wbc_action_space(self) -> spaces.Dict: + action_space: Dict[str, spaces.Space] = { + "action.navigate_command": spaces.Box( + low=-np.inf, high=np.inf, shape=(3,), dtype=np.float32 + ), + "action.base_height_command": spaces.Box( + low=-np.inf, high=np.inf, shape=(1,), dtype=np.float32 + ), + "action.left_hand": spaces.Box( + low=-np.inf, + high=np.inf, + shape=(self._get_joint_group_size("left_hand"),), + dtype=np.float32, + ), + "action.right_hand": spaces.Box( + low=-np.inf, + high=np.inf, + shape=(self._get_joint_group_size("right_hand"),), + dtype=np.float32, + ), + "action.left_arm": spaces.Box( + low=-np.inf, + high=np.inf, + shape=(self._get_joint_group_size("left_arm"),), + dtype=np.float32, + ), + "action.right_arm": spaces.Box( + low=-np.inf, + high=np.inf, + shape=(self._get_joint_group_size("right_arm"),), + dtype=np.float32, + ), + } + if ( + "waist" + in self.robot_model.supplemental_info.joint_groups["upper_body_no_hands"]["groups"] # type: ignore[attr-defined] + ): + action_space["action.waist"] = spaces.Box( + low=-np.inf, + high=np.inf, + shape=(self._get_joint_group_size("waist"),), + dtype=np.float32, + ) + return spaces.Dict(action_space) + + +def concat_action(robot_model: RobotModel, goal: Dict[str, Any]) -> Dict[str, Any]: + """Combine individual joint-group targets into the upper-body action vector.""" + processed_goal = {} + for key, value in goal.items(): + processed_goal[key.replace("action.", "")] = value + + first_value = next(iter(processed_goal.values())) + action = np.zeros(first_value.shape[:-1] + (robot_model.num_dofs,)) + + action_dict = {} + action_dict["navigate_cmd"] = processed_goal.pop("navigate_command", DEFAULT_NAV_CMD) + action_dict["base_height_command"] = np.array( + processed_goal.pop("base_height_command", DEFAULT_BASE_HEIGHT) + ) + + for joint_group, value in processed_goal.items(): + indices = robot_model.get_joint_group_indices(joint_group) + action[..., indices] = value + + upper_body_indices = robot_model.get_joint_group_indices("upper_body") + action = action[..., upper_body_indices] + action_dict["target_upper_body_pose"] = action + return action_dict + + +def prepare_observation_for_eval(robot_model: RobotModel, obs: dict) -> dict: + """Add joint-group slices to an observation dict (real + sim evaluation helper).""" + assert "q" in obs, "q is not in the observation" + + whole_q = obs["q"] + assert whole_q.shape[-1] == robot_model.num_joints, "q has wrong shape" + + left_arm_q = whole_q[..., robot_model.get_joint_group_indices("left_arm")] + right_arm_q = whole_q[..., robot_model.get_joint_group_indices("right_arm")] + waist_q = whole_q[..., robot_model.get_joint_group_indices("waist")] + left_leg_q = whole_q[..., robot_model.get_joint_group_indices("left_leg")] + right_leg_q = whole_q[..., robot_model.get_joint_group_indices("right_leg")] + left_hand_q = whole_q[..., robot_model.get_joint_group_indices("left_hand")] + right_hand_q = whole_q[..., robot_model.get_joint_group_indices("right_hand")] + + obs["state.left_arm"] = left_arm_q + obs["state.right_arm"] = right_arm_q + obs["state.waist"] = waist_q + obs["state.left_leg"] = left_leg_q + obs["state.right_leg"] = right_leg_q + obs["state.left_hand"] = left_hand_q + obs["state.right_hand"] = right_hand_q + + return obs + + +def prepare_gym_space_for_eval( + robot_model: RobotModel, gym_space: gym.spaces.Dict +) -> gym.spaces.Dict: + """Extend a gym Dict space with the joint-group keys used during evaluation.""" + left_arm_space = spaces.Box( + low=-np.inf, + high=np.inf, + shape=(len(robot_model.get_joint_group_indices("left_arm")),), + ) + right_arm_space = spaces.Box( + low=-np.inf, + high=np.inf, + shape=(len(robot_model.get_joint_group_indices("right_arm")),), + ) + waist_space = spaces.Box( + low=-np.inf, + high=np.inf, + shape=(len(robot_model.get_joint_group_indices("waist")),), + ) + left_leg_space = spaces.Box( + low=-np.inf, + high=np.inf, + shape=(len(robot_model.get_joint_group_indices("left_leg")),), + ) + right_leg_space = spaces.Box( + low=-np.inf, + high=np.inf, + shape=(len(robot_model.get_joint_group_indices("right_leg")),), + ) + left_hand_space = spaces.Box( + low=-np.inf, + high=np.inf, + shape=(len(robot_model.get_joint_group_indices("left_hand")),), + ) + right_hand_space = spaces.Box( + low=-np.inf, + high=np.inf, + shape=(len(robot_model.get_joint_group_indices("right_hand")),), + ) + + gym_space["state.left_arm"] = left_arm_space + gym_space["state.right_arm"] = right_arm_space + gym_space["state.waist"] = waist_space + gym_space["state.left_leg"] = left_leg_space + gym_space["state.right_leg"] = right_leg_space + gym_space["state.left_hand"] = left_hand_space + gym_space["state.right_hand"] = right_hand_space + + return gym_space diff --git a/gr00t_wbc/control/utils/network_utils.py b/decoupled_wbc/control/utils/network_utils.py similarity index 100% rename from gr00t_wbc/control/utils/network_utils.py rename to decoupled_wbc/control/utils/network_utils.py diff --git a/decoupled_wbc/control/utils/ros_utils.py b/decoupled_wbc/control/utils/ros_utils.py new file mode 100644 index 0000000..f24890f --- /dev/null +++ b/decoupled_wbc/control/utils/ros_utils.py @@ -0,0 +1,201 @@ +import base64 +import signal +import threading +from typing import Optional + +import msgpack +import msgpack_numpy as mnp +import rclpy +from rclpy.executors import SingleThreadedExecutor +from rclpy.node import Node +from sensor_msgs.msg import Image +from std_msgs.msg import ByteMultiArray +from std_srvs.srv import Trigger + +_signal_registered = False + + +def register_keyboard_interrupt_handler(): + """ + Register a signal handler for SIGINT (Ctrl+C) and SIGTERM that raises KeyboardInterrupt. + This ensures consistent exception handling across different termination signals. + """ + global _signal_registered + if not _signal_registered: + + def signal_handler(signum, frame): + raise KeyboardInterrupt + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + _signal_registered = True + + +class ROSManager: + """ + Manages the ROS2 node and executor. + + Usage example: + ```python + def main(): + ros_manager = ROSManager() + node = ros_manager.node + + try: + while ros_manager.ok(): + time.sleep(0.1) + except ros_manager.exceptions() as e: + print(f"ROSManager interrupted by user: {e}") + finally: + ros_manager.shutdown() + ``` + """ + + def __init__(self, node_name: str = "ros_manager"): + if not rclpy.ok(): + rclpy.init() + self.node = rclpy.create_node(node_name) + self.thread = threading.Thread(target=rclpy.spin, args=(self.node,), daemon=True) + self.thread.start() + else: + executor = rclpy.get_global_executor() + if len(executor.get_nodes()) > 0: + self.node = executor.get_nodes()[0] + else: + self.node = rclpy.create_node(node_name) + + register_keyboard_interrupt_handler() + + @staticmethod + def ok(): + return rclpy.ok() + + @staticmethod + def shutdown(): + if rclpy.ok(): + rclpy.shutdown() + + @staticmethod + def exceptions(): + return (rclpy.exceptions.ROSInterruptException, KeyboardInterrupt) + + +class ROSMsgPublisher: + """ + Publishes any serializable dict to a topic. + """ + + def __init__(self, topic_name: str): + ros_manager = ROSManager() + self.node = ros_manager.node + self.publisher = self.node.create_publisher(ByteMultiArray, topic_name, 1) + + def publish(self, msg: dict): + payload = msgpack.packb(msg, default=mnp.encode) + payload = tuple(bytes([a]) for a in payload) + msg = ByteMultiArray() + msg.data = payload + self.publisher.publish(msg) + + +class ROSMsgSubscriber: + """ + Subscribes to any topics published by a ROSMsgPublisher. + """ + + def __init__(self, topic_name: str): + ros_manager = ROSManager() + self.node = ros_manager.node + self._msg = None + self.subscription = self.node.create_subscription( + ByteMultiArray, topic_name, self._callback, 1 + ) + + def _callback(self, msg: ByteMultiArray): + self._msg = msg + + def get_msg(self) -> Optional[dict]: + msg = self._msg + if msg is None: + return None + self._msg = None + return msgpack.unpackb(bytes([ab for a in msg.data for ab in a]), object_hook=mnp.decode) + + +class ROSImgMsgSubscriber: + """ + Subscribes to an `Image` topic and returns the image as a numpy array and timestamp. + """ + + def __init__(self, topic_name: str): + ros_manager = ROSManager() + self.node = ros_manager.node + self._msg = None + self.subscription = self.node.create_subscription(Image, topic_name, self._callback, 1) + + from decoupled_wbc.control.utils.cv_bridge import CvBridge + + self.bridge = CvBridge() + + def _callback(self, msg: Image): + self._msg = msg + + def get_image(self) -> Optional[dict]: + """ + Returns the image as a numpy array and the timestamp. + """ + + msg = self._msg + if msg is None: + return None + return { + "image": self.bridge.imgmsg_to_cv2(msg), + "timestamp": msg.header.stamp.sec + msg.header.stamp.nanosec * 1e-9, + } + + +class ROSServiceServer: + """ + Generic ROS2 Service server that stores and serves a config dict. + """ + + def __init__(self, service_name: str, config: dict): + ros_manager = ROSManager() + self.node = ros_manager.node + packed = msgpack.packb(config, default=mnp.encode) + self.message = base64.b64encode(packed).decode("ascii") + self.server = self.node.create_service(Trigger, service_name, self._callback) + + def _callback(self, request, response): + try: + response.success = True + response.message = self.message + print("Sending encoded message of length:", len(response.message)) + except Exception as e: + response.success = False + response.message = str(e) + return response + + +class ROSServiceClient(Node): + + def __init__(self, service_name: str, node_name: str = "service_client"): + super().__init__(node_name) + self.cli = self.create_client(Trigger, service_name) + while not self.cli.wait_for_service(timeout_sec=1.0): + self.get_logger().info("service not available, waiting again...") + self.req = Trigger.Request() + + def get_config(self): + future = self.cli.call_async(self.req) + executor = SingleThreadedExecutor() + executor.add_node(self) + executor.spin_until_future_complete(future, timeout_sec=1.0) + executor.remove_node(self) + executor.shutdown() + result = future.result() + if result.success: + decoded = base64.b64decode(result.message.encode("ascii")) + return msgpack.unpackb(decoded, object_hook=mnp.decode) + else: + raise RuntimeError(f"Service call failed: {result.message}") diff --git a/gr00t_wbc/control/utils/run_real_checklist.py b/decoupled_wbc/control/utils/run_real_checklist.py similarity index 100% rename from gr00t_wbc/control/utils/run_real_checklist.py rename to decoupled_wbc/control/utils/run_real_checklist.py diff --git a/gr00t_wbc/control/utils/service.py b/decoupled_wbc/control/utils/service.py similarity index 100% rename from gr00t_wbc/control/utils/service.py rename to decoupled_wbc/control/utils/service.py diff --git a/decoupled_wbc/control/utils/sync_sim_utils.py b/decoupled_wbc/control/utils/sync_sim_utils.py new file mode 100644 index 0000000..ab02db5 --- /dev/null +++ b/decoupled_wbc/control/utils/sync_sim_utils.py @@ -0,0 +1,590 @@ +from datetime import datetime +import pathlib +import shutil +from typing import Any, Dict, Optional + +import numpy as np + +from decoupled_wbc.control.envs.robocasa.sync_env import G1SyncEnv, SyncEnv +from decoupled_wbc.control.envs.robocasa.utils.controller_utils import ( + get_body_ik_solver_settings_type, + update_robosuite_controller_configs, +) +from decoupled_wbc.control.main.constants import DEFAULT_BASE_HEIGHT, DEFAULT_NAV_CMD +from decoupled_wbc.control.main.teleop.configs.configs import SyncSimDataCollectionConfig +from decoupled_wbc.control.policy.teleop_policy import TeleopPolicy +from decoupled_wbc.control.policy.wbc_policy_factory import get_wbc_policy +from decoupled_wbc.control.robot_model.instantiation import get_robot_type_and_model +from decoupled_wbc.control.robot_model.robot_model import RobotModel +from decoupled_wbc.control.teleop.solver.hand.instantiation.g1_hand_ik_instantiation import ( + instantiate_g1_hand_ik_solver, +) +from decoupled_wbc.control.teleop.teleop_retargeting_ik import TeleopRetargetingIK +from decoupled_wbc.control.utils.episode_state import EpisodeState +from decoupled_wbc.control.utils.text_to_speech import TextToSpeech +from decoupled_wbc.data.exporter import Gr00tDataExporter +from decoupled_wbc.data.utils import get_dataset_features, get_modality_config + +MAX_MUJOCO_STATE_LEN = 800 +COLLECTION_KEY = "c" +SKIP_KEY = "x" + + +class EpisodeManager: + """Manages episode state transitions, done flags, and task completion hold counts. + + This class encapsulates the logic for: + - Episode state management (IDLE -> RECORDING -> NEED_TO_SAVE) + - Done flag handling + - Task completion hold count tracking + - Data collection triggering based on manual/auto mode + - Step counting within episodes + """ + + def __init__(self, config: SyncSimDataCollectionConfig): + self.config = config + self.task_completion_hold_count = -1 + self.done = False + self.step_count = 0 + + # Initialize episode state and text-to-speech for both manual and automatic modes + self.episode_state = EpisodeState() + self.text_to_speech = TextToSpeech() + + def should_collect_data(self) -> bool: + """Determine if data should be collected at this timestep.""" + if not self.config.data_collection: + return False + + if self.config.manual_control: + # Manual mode: only collect when RECORDING or NEED_TO_SAVE + return self.episode_state.get_state() in [ + self.episode_state.RECORDING, + self.episode_state.NEED_TO_SAVE, + ] + else: + # Auto mode: collect when RECORDING or NEED_TO_SAVE + return self.episode_state.get_state() in [ + self.episode_state.RECORDING, + self.episode_state.NEED_TO_SAVE, + ] + + def increment_step(self): + """Increment the step counter.""" + self.step_count += 1 + + def reset_step_count(self): + """Reset the step counter to 0.""" + self.step_count = 0 + + def get_step_count(self) -> int: + """Get the current step count.""" + return self.step_count + + def handle_collection_trigger( + self, wbc_goal: dict, keyboard_input: str | None, step_info: dict + ): + """Handle data collection start/stop triggers. + + Args: + wbc_goal: WBC goal dictionary + keyboard_input: Keyboard input from user (can be None) + step_info: Step information from environment containing success flag + """ + self.done = False + state_changed = False + + if self.config.manual_control: + if wbc_goal.get("toggle_data_collection", False) or ( + keyboard_input and keyboard_input == COLLECTION_KEY + ): + self.episode_state.change_state() + # IDLE -> RECORDING or RECORDING -> NEED_TO_SAVE + state_changed = True + else: + # Auto mode: automatically start collecting data for new env + if self.episode_state.get_state() == self.episode_state.IDLE: + self.episode_state.change_state() # IDLE -> RECORDING + state_changed = True + self.done = step_info.get("success", False) + if ( + self.episode_state.get_state() == self.episode_state.RECORDING + and self.done + and self.task_completion_hold_count == 0 + ): + self.episode_state.change_state() # RECORDING -> NEED_TO_SAVE + state_changed = True + + # Check for CI test completion + if self.config.ci_test and self.step_count >= self._get_ci_test_steps(): + self.episode_state.change_state() # RECORDING -> NEED_TO_SAVE + state_changed = True + + if state_changed: + if self.episode_state.get_state() == self.episode_state.RECORDING: + self.text_to_speech.print_and_say("Started recording episode") + elif self.episode_state.get_state() == self.episode_state.NEED_TO_SAVE: + self.done = True + self.task_completion_hold_count = 0 + self.text_to_speech.print_and_say("Stopping recording, preparing to save") + + def check_export_and_completion(self, exporter: Gr00tDataExporter) -> bool: + """Check if episode should be exported and update completion state. + + Args: + exporter: Data exporter instance + + Returns: + bool: True if environment needs to be reset + """ + need_reset = False + + # Check if we should save the episode + if self.task_completion_hold_count == 0: + exporter.save_episode() + need_reset = True + self.task_completion_hold_count = -1 + + if self.episode_state.get_state() == self.episode_state.NEED_TO_SAVE: + self.episode_state.change_state() # NEED_TO_SAVE -> IDLE + self.text_to_speech.print_and_say("Episode saved.") + + # State machine to check for having a success for N consecutive timesteps + elif self.done: + print( + f"Task success detected! Will collect {self.config.success_hold_steps} additional steps..." + ) + print(f"currently {self.task_completion_hold_count}") + if self.task_completion_hold_count > 0: + self.task_completion_hold_count -= 1 # latched state, decrement count + print(f"Task completed! Collecting {self.task_completion_hold_count} more steps...") + else: + self.task_completion_hold_count = ( + self.config.success_hold_steps + ) # reset count on first success timestep + print( + f"Task success detected! Will collect {self.config.success_hold_steps} additional steps..." + ) + else: + self.task_completion_hold_count = -1 # null the counter if there's no success + + return need_reset + + def handle_skip( + self, wbc_goal: dict, keyboard_input: str | None, exporter: Gr00tDataExporter + ) -> bool: + """Handle episode skip/abort. + + Args: + wbc_goal: WBC goal dictionary + keyboard_input: Keyboard input from user (can be None) + exporter: Data exporter instance + + Returns: + bool: True if episode was skipped and needs reset + """ + if wbc_goal.get("toggle_data_abort", False) or ( + keyboard_input and keyboard_input == SKIP_KEY + ): + exporter.skip_and_start_new_episode() + self.episode_state.reset_state() + self.text_to_speech.print_and_say("Episode discarded, starting new episode") + return True + return False + + def _get_ci_test_steps(self) -> int: + """Get CI test steps based on CI test mode.""" + if self.config.get("ci_test_mode", "unit") == "unit": + return 50 + else: # pre_merge + return 500 + + +class CITestManager: + def __init__(self, config: SyncSimDataCollectionConfig): + self.config = config + self.ci_test_steps = 50 if config.ci_test_mode == "unit" else 500 + self.enable_tracking_check = True if config.ci_test_mode == "pre_merge" else False + self.upper_body_speed_hist = [] + self.last_q_upper_body = None + self.end_effector_tracking_errors = [] + + def check_upper_body_motion( + self, robot_model: RobotModel, wbc_action: dict, config: SyncSimDataCollectionConfig + ): + upper_body_joint_indices = robot_model.get_joint_group_indices("upper_body") + q_upper_body = wbc_action["q"][upper_body_joint_indices] + if self.last_q_upper_body is None: + self.last_q_upper_body = q_upper_body.copy() + self.upper_body_speed_hist.append( + np.abs(q_upper_body - self.last_q_upper_body).mean() * config.control_frequency + ) + self.last_q_upper_body = q_upper_body.copy() + assert q_upper_body.mean() != 0, "Upper body joints should not be zero" + + def check_end_effector_tracking( + self, teleop_cmd: dict, obs: dict, config: SyncSimDataCollectionConfig, i: int + ): + """ + Check end effector tracking error and validate thresholds. + + Args: + teleop_cmd: Teleoperation command containing target poses + obs: Environment observation containing current poses + end_effector_tracking_errors: List to store tracking errors + config: Configuration object containing robot type + i: Current step index + ci_test_steps: Number of steps for CI test + upper_body_speed_hist: History of upper body joint speeds + """ + from scipy.spatial.transform import Rotation as R + + # Get target poses from teleop command (replay data format) + target_left_wrist = teleop_cmd.get("left_wrist") + target_right_wrist = teleop_cmd.get("right_wrist") + + wrist_pose = obs.get("wrist_pose") + + # Extract left and right wrist poses from environment observation (7 for left + 7 for right) + left_pos = wrist_pose[:3] + left_quat = wrist_pose[3:7] + right_pos = wrist_pose[7:10] + right_quat = wrist_pose[10:14] + + # Convert quaternions to rotation matrices for error calculation + left_rot_matrix = R.from_quat(left_quat, scalar_first=True).as_matrix() + right_rot_matrix = R.from_quat(right_quat, scalar_first=True).as_matrix() + + # Construct 4x4 transformation matrices + actual_left_wrist = np.eye(4) + actual_left_wrist[:3, 3] = left_pos + actual_left_wrist[:3, :3] = left_rot_matrix + + actual_right_wrist = np.eye(4) + actual_right_wrist[:3, 3] = right_pos + actual_right_wrist[:3, :3] = right_rot_matrix + + # Calculate position error + left_pos_error = np.linalg.norm(target_left_wrist[:3, 3] - actual_left_wrist[:3, 3]) + right_pos_error = np.linalg.norm(target_right_wrist[:3, 3] - actual_right_wrist[:3, 3]) + + # Calculate rotation error (similar to test_teleop_retargeting_ik) + left_rot_diff = actual_left_wrist[:3, :3] @ target_left_wrist[:3, :3].T + left_rot_error = np.arccos(np.clip((np.trace(left_rot_diff) - 1) / 2, -1, 1)) + right_rot_diff = actual_right_wrist[:3, :3] @ target_right_wrist[:3, :3].T + right_rot_error = np.arccos(np.clip((np.trace(right_rot_diff) - 1) / 2, -1, 1)) + + # Store max error for this timestep + max_pos_error = max(left_pos_error, right_pos_error) + max_rot_error = max(left_rot_error, right_rot_error) + self.end_effector_tracking_errors.append((max_pos_error, max_rot_error)) + + if i >= self.ci_test_steps: + max_pos_errors = [error[0] for error in self.end_effector_tracking_errors] + max_rot_errors = [error[1] for error in self.end_effector_tracking_errors] + + max_pos_error = np.max(max_pos_errors) + max_rot_error = np.max(max_rot_errors) + + average_pos_error = np.mean(max_pos_errors) + average_rot_error = np.mean(max_rot_errors) + + # More realistic thresholds based on observed data + max_pos_threshold = 0.07 # 7cm threshold + max_rot_threshold = np.deg2rad(17) # 17 degree threshold + average_pos_threshold = 0.05 # 5cm threshold + average_rot_threshold = np.deg2rad(12) # 12 degree threshold + + print(f" Position errors - Max: {max_pos_error:.4f}") + print(f" Rotation errors - Max: {np.rad2deg(max_rot_error):.2f}°") + print(f" Average position error: {average_pos_error:.4f}") + print(f" Average rotation error: {np.rad2deg(average_rot_error):.2f}°") + + assert ( + max_pos_error < max_pos_threshold + ), "Maximum end effector position tracking error exceeds threshold" + assert ( + max_rot_error < max_rot_threshold + ), "Maximum end effector rotation tracking error exceeds threshold" + assert ( + average_pos_error < average_pos_threshold + ), "Average end effector position tracking error exceeds threshold" + assert ( + average_rot_error < average_rot_threshold + ), "Average end effector rotation tracking error exceeds threshold" + + assert ( + np.array(self.upper_body_speed_hist).mean() > 0.03 + ), "Mean upper body joint velocities should be larger. Robot might not be moving." + + print("End effector tracking validation passed.") + + # Ensure end effector tracking validation has run + if not self.end_effector_tracking_errors: + assert False, "No end effector tracking data collected during CI test" + elif len(self.end_effector_tracking_errors) < self.ci_test_steps: + assert ( + False + ), f"Only {len(self.end_effector_tracking_errors)} end effector tracking samples collected" + + +def get_features( + robot_model: RobotModel, save_img_obs: bool = False, image_obs_configs: dict[str, dict] = {} +) -> dict[str, dict]: + """Fixture providing test features dict.""" + features = get_dataset_features(robot_model) + features.update( + { + "observation.sim.seed": { + "dtype": "int32", + "shape": (1,), + }, + "observation.sim.max_mujoco_state_len": { + "dtype": "int32", + "shape": (1,), + }, + "observation.sim.mujoco_state_len": { + "dtype": "int32", + "shape": (1,), + }, + "observation.sim.mujoco_state": { + "dtype": "float64", + "shape": (MAX_MUJOCO_STATE_LEN,), + }, + "observation.sim.left_wrist": { + "dtype": "float64", + "shape": (16,), + }, + "observation.sim.right_wrist": { + "dtype": "float64", + "shape": (16,), + }, + "observation.sim.left_fingers": { + "dtype": "float64", + "shape": (400,), + }, + "observation.sim.right_fingers": { + "dtype": "float64", + "shape": (400,), + }, + "observation.sim.target_upper_body_pose": { + "dtype": "float64", + "shape": (len(robot_model.get_joint_group_indices("upper_body")),), + }, + # TODO: support different reduced robot models + } + ) + + features.pop("observation.img_state_delta") + features.pop("observation.images.ego_view") + + if save_img_obs: + for key, value in image_obs_configs.items(): + features.update( + { + f"observation.images.{key.replace('_image', '')}": { + "dtype": "video", + "shape": value["shape"], + "names": ["height", "width", "channel"], + } + } + ) + + return features + + +def generate_frame( + obs: Dict[str, Any], + wbc_action: Dict[str, Any], + seed: int, + mujoco_state: np.ndarray, + mujoco_state_len: int, + max_mujoco_state_len: int, + teleop_cmd: Dict[str, Any], + wbc_goal: Dict[str, Any], + save_img_obs: bool = False, +): + frame = { + "observation.state": np.array(obs["q"], dtype=np.float64), + "observation.eef_state": np.array(obs["wrist_pose"], dtype=np.float64), + "action": np.array(wbc_action["q"], dtype=np.float64), + "action.eef": np.array(wbc_goal["wrist_pose"], dtype=np.float64), + "teleop.navigate_command": np.array( + wbc_goal.get("navigate_cmd", DEFAULT_NAV_CMD), dtype=np.float64 + ), + "teleop.base_height_command": np.array( + teleop_cmd.get("base_height_command", [DEFAULT_BASE_HEIGHT]), dtype=np.float64 + ).reshape( + 1, + ), + "observation.sim.seed": np.array([seed], dtype=np.int32), + "observation.sim.mujoco_state_len": np.array([mujoco_state_len], dtype=np.int32), + "observation.sim.max_mujoco_state_len": np.array([max_mujoco_state_len], dtype=np.int32), + "observation.sim.mujoco_state": mujoco_state.astype(np.float64), + "observation.sim.left_wrist": teleop_cmd["left_wrist"].flatten().astype(np.float64), + "observation.sim.right_wrist": teleop_cmd["right_wrist"].flatten().astype(np.float64), + "observation.sim.left_fingers": teleop_cmd["left_fingers"]["position"] + .flatten() + .astype(np.float64), + "observation.sim.right_fingers": teleop_cmd["right_fingers"]["position"] + .flatten() + .astype(np.float64), + "observation.sim.target_upper_body_pose": wbc_goal["target_upper_body_pose"].astype( + np.float64 + ), + # "observation.sim.target_time": np.array([wbc_goal["target_time"]], dtype=np.float64), + # "observation.sim.interpolation_garbage_collection_time": np.array( + # [wbc_goal["interpolation_garbage_collection_time"]], dtype=np.float64 + # ), + } + + if save_img_obs: + for key, value in obs.items(): + if key.endswith("image"): + frame[f"observation.images.{key.replace('_image', '')}"] = value + return frame + + +def get_data_exporter( + config: SyncSimDataCollectionConfig, + obs: Dict[str, Any], + robot_model: RobotModel, + save_path: Optional[pathlib.Path] = None, +) -> Gr00tDataExporter: + if save_path is None: + save_path = pathlib.Path( + f"./outputs/{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}-{config.robot}-sim-{config.task_name}/" + ) + + if config.remove_existing_dir: + if save_path.exists(): + shutil.rmtree(save_path) + + image_obs_configs = {} + for key, value in obs.items(): + if key.endswith("image"): + image_obs_configs[key] = { + "dtype": "uint8", + "shape": value.shape, + } + + # TODO: use standardized keys for training dataset + modality_config = get_modality_config(robot_model) + exporter = Gr00tDataExporter.create( + save_root=save_path, + fps=config.control_frequency, + features=get_features( + robot_model, save_img_obs=config.save_img_obs, image_obs_configs=image_obs_configs + ), + modality_config=modality_config, + task=config.task_name, + script_config=config, + robot_type=config.robot, + vcodec="libx264", # Use a common codec that should be available + ) + return exporter + + +def get_env(config: SyncSimDataCollectionConfig, **kwargs) -> SyncEnv: + robot_type, _ = get_robot_type_and_model(config.robot, enable_waist_ik=config.enable_waist) + print("Instantiating environment:", config.env_name, config.robot) + controller_configs = update_robosuite_controller_configs( + robot=config.robot, + wbc_version=config.wbc_version, + enable_gravity_compensation=config.enable_gravity_compensation, + ) + + kwargs.update( + { + "ik_indicator": config.ik_indicator, + "control_freq": config.control_frequency, + "renderer": config.renderer, + "controller_configs": controller_configs, + "enable_waist": config.enable_waist, + "enable_gravity_compensation": config.enable_gravity_compensation, + "gravity_compensation_joints": config.gravity_compensation_joints, + } + ) + if robot_type == "g1": + env_type = G1SyncEnv + else: + raise ValueError(f"Unsupported robot type: {robot_type}") + + env = env_type( + env_name=config.task_name, # TODO: should merge with config.env_name + robot_name=config.robot, + **kwargs, + ) + return env + + +def get_env_name(robot: str, task_name: str, enable_waist_ik: bool = False) -> str: + robot_type, _ = get_robot_type_and_model(robot, enable_waist_ik=enable_waist_ik) + env_name = f"gr00tlocomanip_{robot_type}_sim/{task_name}_{robot}_Env" + return env_name + + +def get_body_teleoped_joint_groups(robot: str) -> list[str]: + robot2body_teleoped_joint_groups = { + "G1FixedLowerBody": ["arms"], + "G1FixedBase": ["arms"], + "G1ArmsOnly": ["arms"], + "G1": ["upper_body"], + } + return robot2body_teleoped_joint_groups[robot] + + +def get_teleop_policy( + robot_type: str, + robot_model: RobotModel, + config: SyncSimDataCollectionConfig, + activate_keyboard_listener: bool = True, +) -> TeleopPolicy: + if robot_type == "g1": + left_hand_ik_solver, right_hand_ik_solver = instantiate_g1_hand_ik_solver() + else: + raise ValueError(f"Invalid robot type: {robot_type}") + + # Initializing the teleop policy will block the main process until the Leap Motion is ready. + retargeting_ik = TeleopRetargetingIK( + robot_model=robot_model, + left_hand_ik_solver=left_hand_ik_solver, + right_hand_ik_solver=right_hand_ik_solver, + enable_visualization=config.enable_visualization, + body_active_joint_groups=get_body_teleoped_joint_groups(config.robot), + body_ik_solver_settings_type=get_body_ik_solver_settings_type(config.robot), + ) + teleop_policy = TeleopPolicy( + robot_model=robot_model, + retargeting_ik=retargeting_ik, + body_control_device=config.body_control_device, + hand_control_device=config.hand_control_device, + body_streamer_ip=config.body_streamer_ip, + body_streamer_keyword=config.body_streamer_keyword, + enable_real_device=config.enable_real_device, + replay_data_path=config.replay_data_path, + replay_speed=config.replay_speed, + activate_keyboard_listener=activate_keyboard_listener, + ) + return teleop_policy + + +def get_wbc_config(config: SyncSimDataCollectionConfig): + wbc_config = config.load_wbc_yaml() + wbc_config["upper_body_policy_type"] = "identity" + return wbc_config + + +def get_policies( + config: SyncSimDataCollectionConfig, + robot_type: str, + robot_model: RobotModel, + activate_keyboard_listener: bool = True, +): + wbc_config = get_wbc_config(config) + wbc_policy = get_wbc_policy(robot_type, robot_model, wbc_config, init_time=0.0) + wbc_policy.activate_policy() + teleop_policy = get_teleop_policy(robot_type, robot_model, config, activate_keyboard_listener) + if not config.manual_control: + teleop_policy.activate_policy() + return wbc_policy, teleop_policy diff --git a/gr00t_wbc/control/utils/telemetry.py b/decoupled_wbc/control/utils/telemetry.py similarity index 100% rename from gr00t_wbc/control/utils/telemetry.py rename to decoupled_wbc/control/utils/telemetry.py diff --git a/gr00t_wbc/control/utils/term_color_constants.py b/decoupled_wbc/control/utils/term_color_constants.py similarity index 100% rename from gr00t_wbc/control/utils/term_color_constants.py rename to decoupled_wbc/control/utils/term_color_constants.py diff --git a/gr00t_wbc/control/utils/text_to_speech.py b/decoupled_wbc/control/utils/text_to_speech.py similarity index 100% rename from gr00t_wbc/control/utils/text_to_speech.py rename to decoupled_wbc/control/utils/text_to_speech.py diff --git a/decoupled_wbc/control/visualization/humanoid_visualizer.py b/decoupled_wbc/control/visualization/humanoid_visualizer.py new file mode 100644 index 0000000..815af25 --- /dev/null +++ b/decoupled_wbc/control/visualization/humanoid_visualizer.py @@ -0,0 +1,52 @@ +import time + +import meshcat_shapes +import numpy as np +from pinocchio.visualize import MeshcatVisualizer + +from decoupled_wbc.control.robot_model import RobotModel +from decoupled_wbc.control.robot_model.instantiation.g1 import instantiate_g1_robot_model + + +class RobotVisualizer: + def __init__(self, robot: RobotModel): + self.robot = robot + self.viz = MeshcatVisualizer( + self.robot.pinocchio_wrapper.model, + self.robot.pinocchio_wrapper.collision_model, + self.robot.pinocchio_wrapper.visual_model, + ) + try: + self.viz.initViewer(open=True) + + except ImportError as err: + print("Error while initializing the viewer. It seems you should install Python meshcat") + print(err) + exit(0) + + self.viz.loadViewerModel() + self.viz.display(self.robot.q_zero) + + # Visualize frames + self.viz_frames = [self.robot.supplemental_info.root_frame_name] + for side in ["left", "right"]: + self.viz_frames.append(self.robot.supplemental_info.hand_frame_names[side]) + for frame in self.viz_frames: + meshcat_shapes.frame(self.viz.viewer[frame], opacity=1.0) + + def visualize(self, robot_state: np.ndarray): + # visualize robot state + if robot_state is not None: + self.robot.cache_forward_kinematics(robot_state, auto_clip=False) + self.viz.display(robot_state) + for frame_name in self.viz_frames: + self.viz.viewer[frame_name].set_transform(self.robot.frame_placement(frame_name).np) + + +if __name__ == "__main__": + # robot_model = instantiate_gr1_robot_model() + robot_model = instantiate_g1_robot_model() + visualizer = RobotVisualizer(robot_model) + while True: + visualizer.visualize(robot_model.q_zero) + time.sleep(0.01) diff --git a/decoupled_wbc/control/visualization/meshcat_visualizer_env.py b/decoupled_wbc/control/visualization/meshcat_visualizer_env.py new file mode 100644 index 0000000..c3981d7 --- /dev/null +++ b/decoupled_wbc/control/visualization/meshcat_visualizer_env.py @@ -0,0 +1,77 @@ +from contextlib import contextmanager +import time + +import gymnasium as gym +import numpy as np +from pinocchio.visualize import MeshcatVisualizer + +from decoupled_wbc.control.base.env import Env +from decoupled_wbc.control.robot_model import RobotModel + + +class MeshcatVisualizerEnv(Env): + def __init__(self, robot_model: RobotModel): + self.robot_model = robot_model + self.viz = MeshcatVisualizer( + self.robot_model.pinocchio_wrapper.model, + self.robot_model.pinocchio_wrapper.collision_model, + self.robot_model.pinocchio_wrapper.visual_model, + ) + try: + self.viz.initViewer(open=True) + + except ImportError as err: + print("Error while initializing the viewer. It seems you should install Python meshcat") + print(err) + exit(0) + + self.viz.loadViewerModel() + self.visualize(self.robot_model.pinocchio_wrapper.q0) + time.sleep(1.0) + + self._observation_space = gym.spaces.Dict( + { + "q": gym.spaces.Box( + low=-2 * np.pi, high=2 * np.pi, shape=(self.robot_model.num_dofs,) + ) + } + ) + self._action_space = gym.spaces.Dict( + { + "q": gym.spaces.Box( + low=-2 * np.pi, high=2 * np.pi, shape=(self.robot_model.num_dofs,) + ) + } + ) + + def visualize(self, robot_state: np.ndarray): + # visualize robot state + if robot_state is not None: + self.viz.display(robot_state) + + def observe(self): + # Dummy observation + return {"q": self.robot_model.pinocchio_wrapper.q0} + + def queue_action(self, action: dict[str, np.ndarray]): + self.visualize(action["q"]) + + def reset(self, **kwargs): + self.visualize(self.robot_model.pinocchio_wrapper.q0) + return {"q": self.robot_model.pinocchio_wrapper.q0} + + def sensors(self) -> dict[str, any]: + return {} + + def observation_space(self) -> gym.Space: + return self._observation_space + + def action_space(self) -> gym.Space: + return self._action_space + + def close(self): + return + + @contextmanager + def activate(self): + yield diff --git a/gr00t_wbc/data/constants.py b/decoupled_wbc/data/constants.py similarity index 100% rename from gr00t_wbc/data/constants.py rename to decoupled_wbc/data/constants.py diff --git a/decoupled_wbc/data/exporter.py b/decoupled_wbc/data/exporter.py new file mode 100644 index 0000000..4bb4d2e --- /dev/null +++ b/decoupled_wbc/data/exporter.py @@ -0,0 +1,514 @@ +import copy +from dataclasses import dataclass +from functools import partial +import json +import os +from pathlib import Path +import shutil +from typing import Optional + +import datasets +from datasets import load_dataset +from datasets.utils import disable_progress_bars +from huggingface_hub.errors import RepositoryNotFoundError +from lerobot.common.datasets.lerobot_dataset import ( + LeRobotDataset, + LeRobotDatasetMetadata, + compute_episode_stats, +) +from lerobot.common.datasets.utils import ( + check_timestamps_sync, + get_episode_data_index, + validate_episode_buffer, + validate_frame, +) +import numpy as np +from PIL import Image as PILImage +import torch +from torchvision import transforms + +from decoupled_wbc.control.main.config_template import ArgsConfig +from decoupled_wbc.data.video_writer import VideoWriter + +disable_progress_bars() # Disable HuggingFace progress bars + + +@dataclass +class DataCollectionInfo: + """ + This dataclass stores additional information that is relevant to the data collection process. + """ + + lower_body_policy: Optional[str] = None + wbc_model_path: Optional[str] = None + teleoperator_username: Optional[str] = None + support_operator_username: Optional[str] = None + robot_type: Optional[str] = None + robot_id: Optional[str] = None + + def to_dict(self) -> dict: + """Convert the dataclass to a dictionary for JSON serialization.""" + return { + "lower_body_policy": self.lower_body_policy, + "wbc_model_path": self.wbc_model_path, + "teleoperator_username": self.teleoperator_username, + "support_operator_username": self.support_operator_username, + "robot_type": self.robot_type, + "robot_id": self.robot_id, + } + + @classmethod + def from_dict(cls, data: dict) -> "DataCollectionInfo": + """Create a DataCollectionInfo instance from a dictionary.""" + return cls(**data) + + +class Gr00tDatasetMetadata(LeRobotDatasetMetadata): + """ + Additional metadata on top of LeRobotDatasetMetadata: + - modality_config: Written to `meta/modality.json` + - discarded_episode_indices: List of episode indices that were discarded. Written to `meta/info.json` + """ + + MODALITY_CONFIG_REL_PATH = Path("meta/modality.json") + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + with open(self.root / self.MODALITY_CONFIG_REL_PATH, "rb") as f: + self.modality_config = json.load(f) + + @classmethod + def create( + cls, + modality_config: dict, + script_config: dict, + data_collection_info: DataCollectionInfo, + *args, + **kwargs, + ): + cls.validate_modality_config(modality_config) + + # Create base metadata object using parent class + obj = super().create(*args, **kwargs) + + # we also need to initialize the discarded_episode_indices + obj.info["script_config"] = script_config + obj.info["discarded_episode_indices"] = [] + obj.info["data_collection_info"] = data_collection_info.to_dict() + with open(obj.root / "meta" / "info.json", "w") as f: + json.dump(obj.info, f, indent=4) + + obj.__class__ = cls + with open(obj.root / cls.MODALITY_CONFIG_REL_PATH, "w") as f: + json.dump(modality_config, f, indent=4) + obj.modality_config = modality_config + return obj + + @staticmethod + def validate_modality_config(modality_config: dict) -> None: + # verify if it contains all state, action, video, annotation keys + valid_keys = ["state", "action", "video", "annotation"] + if not all(key in modality_config for key in valid_keys): + raise ValueError( + f"Modality config must contain all of the following keys: {valid_keys}" + ) + + # verify that each key has a modality_config dict + for key in valid_keys: + if key not in modality_config: + raise ValueError(f"Modality config must contain a '{key}' key") + + +class Gr00tDataExporter(LeRobotDataset): + """ + A class for exporting data collected for a single session to LeRobot Dataset. + + Intended life cycle: + 1. Create a Gr00tDataExporter object + 2. Add frames using add_frame() + 3. Save the episode using save_episode() + - This will flush the episode buffer to disk + - This will also close the video writers + - Create a new video writer and ep buffer to start new episode + + If interrupted, here's the indented behavior: + - Interruption before save_episode() is called: loses the current episode + - Interruption after save_episode() is called: keeps completed episodes + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.video_writers = self.create_video_writer() + + @property + def repo_id(self): + return self.meta.repo_id + + @property + def root(self): + return self.meta.root + + @property + def local_files_only(self): + return self.meta.local_files_only + + @property + def video_keys(self): + return self.meta.video_keys + + @classmethod + def create( + cls, + save_root: str | Path, + fps: int, + features: dict, + modality_config: dict, + task: str, + script_config: ArgsConfig = ArgsConfig(), + data_collection_info: DataCollectionInfo = DataCollectionInfo(), + robot_type: str | None = None, + tolerance_s: float = 1e-4, + vcodec: str = "h264", + overwrite_existing: bool = False, + upload_bucket_path: str | None = None, + ) -> "Gr00tDataExporter": + """ + Create a Gr00tDataExporter object. + + Args: + save_root: The root directory to save the dataset. + fps: The frame rate of the dataset. + features: The features of the dataset. + modality_config: The modality config of the dataset. + task: The task performed during the data collection session. + data_collection_info: The data collection info. + If the dataset already exists, this argument will be ignored. + If data_collection_info is not provided, it will be set to an empty DataCollectionInfo object. + robot_type: The type of robot. + tolerance_s: The tolerance for the dataset. + image_writer_processes: The number of processes to use for the image writer. + image_writer_threads: The number of threads to use for the image writer. + vcodec: The codec to use for the video writer. + """ + + obj = cls.__new__(cls) + repo_id = ( + "tmp/tmp_dataset" # NOTE(fengyuanh): Not relevant since we are not pushing to the hub + ) + if overwrite_existing and (Path(save_root)).exists(): + print( + f"Found existing dataset at {save_root}", + "Cleaning up this directory since overwrite_existing is True.", + ) + shutil.rmtree(save_root) + + if (Path(save_root)).exists(): + # Try to resume from existing dataset + try: + # Load the metadata + obj.meta = Gr00tDatasetMetadata( + repo_id=repo_id, + root=save_root, + ) + + except RepositoryNotFoundError as e: + raise ValueError( + f"Failed to resume from corrupted dataset. Please manually check the dataset at {save_root}" + ) from e + else: + if not isinstance(script_config, dict): + script_config = script_config.to_dict() + obj.meta = Gr00tDatasetMetadata.create( + repo_id=repo_id, + fps=fps, + root=save_root, + # NOTE(fengyuanh): We use "robot_type" instead of this field which requires a Robot object + robot=None, + robot_type=robot_type, + features=features, + modality_config=modality_config, + script_config=script_config, + # NOTE(fengyuanh): Always use videos for exporting + use_videos=True, + data_collection_info=data_collection_info, + ) + obj.tolerance_s = tolerance_s + obj.video_backend = ( + "pyav" # NOTE(fengyuanh): Only used in training, not relevant for exporting + ) + obj.vcodec = vcodec + obj.task = task + obj.image_writer = None + + obj.episode_buffer = obj.create_episode_buffer() + + obj.episodes = None + obj.hf_dataset = obj.create_hf_dataset() + obj.image_transforms = None + obj.delta_timestamps = None + obj.delta_indices = None + obj.episode_data_index = None + obj.upload_bucket_path = upload_bucket_path + obj.video_writers = obj.create_video_writer() + return obj + + def create_video_writer(self) -> dict[str, VideoWriter]: + video_writers = {} + for key in self.meta.video_keys: + video_writers[key] = VideoWriter( + self.root + / self.meta.get_video_file_path(self.episode_buffer["episode_index"], key), + self.meta.shapes[key][1], + self.meta.shapes[key][0], + self.fps, + self.vcodec, + ) + return video_writers + + # @note (k2): This function is copied from LeRobotDataset.add_frame. + # This is done because we want to bypass lerobot's + # image_writer and use our own VideoWriter class. + def add_frame(self, frame: dict) -> None: + """ + This function only adds the frame to the episode_buffer. Videos are handled by the video_writer, + which uses a stream writer to write to disk. + """ + frame = copy.deepcopy(frame) + frame["task"] = frame.get("task", self.task) + + # Convert torch to numpy if needed + for name in frame: + if isinstance(frame[name], torch.Tensor): + frame[name] = frame[name].numpy() + + validate_frame(frame, self.features) + + if self.episode_buffer is None: + self.episode_buffer = self.create_episode_buffer() + + # Automatically add frame_index and timestamp to episode buffer + frame_index = self.episode_buffer["size"] + timestamp = frame.pop("timestamp") if "timestamp" in frame else frame_index / self.fps + self.episode_buffer["frame_index"].append(frame_index) + self.episode_buffer["timestamp"].append(timestamp) + + # Add frame features to episode_buffer + for key in frame: + if key == "task": + # Note: we associate the task in natural language to its task index during `save_episode` + self.episode_buffer["task"].append(frame["task"]) + continue + + if key not in self.features: + raise ValueError( + f"An element of the frame is not in the features. '{key}' not in '{self.features.keys()}'." + ) + + if self.features[key]["dtype"] in ["image", "video"]: + img_path = self._get_image_file_path( + episode_index=self.episode_buffer["episode_index"], + image_key=key, + frame_index=frame_index, + ) + if frame_index == 0: + img_path.parent.mkdir(parents=True, exist_ok=True) + + # @note (k2): using our own VideoWriter class, bypassing the image_writer + self.video_writers[key].add_frame(frame[key]) + self.episode_buffer[key].append(str(img_path)) + else: + self.episode_buffer[key].append(frame[key]) + + self.episode_buffer["size"] += 1 + + def stop_video_writers(self): + if not hasattr(self, "video_writers"): + raise RuntimeError( + "Can't stop video writers because they haven't been initialized. Call create() first." + ) + for key in self.video_writers: + self.video_writers[key].stop() + + def skip_and_start_new_episode( + self, + ) -> None: + """ + Skip the current episode and start a new one. + """ + self.stop_video_writers() + self.episode_buffer = self.create_episode_buffer() + self.video_writers = self.create_video_writer() + + # @note (k2): Code copied from LeRobotDataset.save_episode + # We override this function because we want to bypass lerobot's `compute_episode_stats` on video features + # since `compute_episode_stats` only works when images are written to disk. + def save_episode(self, episode_data: dict | None = None) -> None: + if not episode_data: + episode_buffer = self.episode_buffer + + validate_episode_buffer(episode_buffer, self.meta.total_episodes, self.features) + + # size and task are special cases that won't be added to hf_dataset + episode_length = episode_buffer.pop("size") + tasks = episode_buffer.pop("task") + episode_tasks = list(set(tasks)) + episode_index = episode_buffer["episode_index"] + + episode_buffer["index"] = np.arange( + self.meta.total_frames, self.meta.total_frames + episode_length + ) + episode_buffer["episode_index"] = np.full((episode_length,), episode_index) + + # Add new tasks to the tasks dictionary + for task in episode_tasks: + task_index = self.meta.get_task_index(task) + if task_index is None: + self.meta.add_task(task) + + # Given tasks in natural language, find their corresponding task indices + episode_buffer["task_index"] = np.array([self.meta.get_task_index(task) for task in tasks]) + + for key, ft in self.features.items(): + # index, episode_index, task_index are already processed above, and image and video + # are processed separately by storing image path and frame info as meta data + if key in ["index", "episode_index", "task_index"] or ft["dtype"] in ["image", "video"]: + continue + episode_buffer[key] = np.stack(episode_buffer[key]) + + self._wait_image_writer() + self._save_episode_table(episode_buffer, episode_index) + + # @note (k2): computing only non-video features stats + non_video_features = {k: v for k, v in self.features.items() if v["dtype"] not in ["video"]} + non_vid_ep_buffer = { + k: v for k, v in episode_buffer.items() if k in non_video_features.keys() + } + ep_stats = compute_episode_stats(non_vid_ep_buffer, non_video_features) + + if len(self.meta.video_keys) > 0: + video_paths = self.encode_episode_videos(episode_index) + for key in self.meta.video_keys: + episode_buffer[key] = video_paths[key] + + # `meta.save_episode` be executed after encoding the videos + self.meta.save_episode(episode_index, episode_length, episode_tasks, ep_stats) + + ep_data_index = get_episode_data_index(self.meta.episodes, [episode_index]) + ep_data_index_np = {k: t.numpy() for k, t in ep_data_index.items()} + check_timestamps_sync( + episode_buffer["timestamp"], + episode_buffer["episode_index"], + ep_data_index_np, + self.fps, + self.tolerance_s, + ) + + video_files = list(self.root.rglob("*.mp4")) + assert len(video_files) == self.num_episodes * len(self.meta.video_keys) + + parquet_files = list(self.root.rglob("*.parquet")) + assert len(parquet_files) == self.num_episodes + + # delete images + img_dir = self.root / "images" + if img_dir.is_dir(): + shutil.rmtree(self.root / "images") + + if not episode_data: # Reset the buffer and create new video writers + self.episode_buffer = self.create_episode_buffer() + self.video_writers = self.create_video_writer() + + # check if all video and parquet files exist + for key in self.meta.video_keys: + video_path = os.path.join(self.root, self.meta.get_video_file_path(episode_index, key)) + if not os.path.exists(video_path): + raise FileNotFoundError( + f"Video path: {video_path} does not exist for episode {episode_index}" + ) + + parquet_path = os.path.join(self.root, self.meta.get_data_file_path(episode_index)) + if not os.path.exists(parquet_path): + raise FileNotFoundError( + f"Parquet path: {parquet_path} does not exist for episode {episode_index}" + ) + + # @note (k2): Overriding LeRobotDataset.encode_episode_videos to use our own VideoWriter class + def encode_episode_videos(self, episode_index: int) -> dict: + video_paths = {} + for key in self.meta.video_keys: + video_paths[key] = self.video_writers[key].stop() + return video_paths + + def save_episode_as_discarded(self) -> None: + """ + Flag ongoing episode as discarded and save it to disk. Failed manipulations (grasp, manipulation) are + flagged as discarded. It will add the episode index to the discarded episode indices list in info.json. + """ + self.meta.info["discarded_episode_indices"] = self.meta.info.get( + "discarded_episode_indices", [] + ) + [self.episode_buffer["episode_index"]] + self.save_episode() + + +def hf_transform_to_torch_by_features( + features: datasets.Sequence, items_dict: dict[torch.Tensor | None] +): + """Get a transform function that convert items from Hugging Face dataset (pyarrow) + to torch tensors. Importantly, images are converted from PIL, which corresponds to + a channel last representation (h w c) of uint8 type, to a torch image representation + with channel first (c h w) of float32 type in range [0,1]. + """ + for key in items_dict: + first_item = items_dict[key][0] + if isinstance(first_item, PILImage.Image): + to_tensor = transforms.ToTensor() + items_dict[key] = [to_tensor(img) for img in items_dict[key]] + elif first_item is None: + pass + else: + if isinstance(features[key], datasets.Value): + dtype_str = features[key].dtype + elif isinstance(features[key], datasets.Sequence): + assert isinstance(features[key].feature, datasets.Value) + dtype_str = features[key].feature.dtype + else: + raise ValueError(f"Unsupported feature type for key '{key}': {features[key]}") + dtype_mapping = { + "float32": torch.float32, + "float64": torch.float64, + "int32": torch.int32, + "int64": torch.int64, + } + items_dict[key] = [ + torch.tensor(x, dtype=dtype_mapping[dtype_str]) for x in items_dict[key] + ] + return items_dict + + +# This is a subclass of LeRobotDataset that only fixes the data type when loading +# By default, LeRobotDataset will automatically convert float64 to float32 +class TypedLeRobotDataset(LeRobotDataset): + def __init__(self, load_video=True, *args, **kwargs): + super().__init__(*args, **kwargs) + if not load_video: + video_keys = [] + for key in self.meta.features.keys(): + if self.meta.features[key]["dtype"] == "video": + video_keys.append(key) + for key in video_keys: + self.meta.features.pop(key) + + def load_hf_dataset(self) -> datasets.Dataset: + """hf_dataset contains all the observations, states, actions, rewards, etc.""" + if self.episodes is None: + path = str(self.root / "data") + hf_dataset = load_dataset("parquet", data_dir=path, split="train") + else: + files = [ + str(self.root / self.meta.get_data_file_path(ep_idx)) for ep_idx in self.episodes + ] + hf_dataset = load_dataset("parquet", data_files=files, split="train") + + # TODO(aliberts): hf_dataset.set_format("torch") + hf_dataset.set_transform(partial(hf_transform_to_torch_by_features, hf_dataset.features)) + return hf_dataset diff --git a/decoupled_wbc/data/utils.py b/decoupled_wbc/data/utils.py new file mode 100644 index 0000000..04874bd --- /dev/null +++ b/decoupled_wbc/data/utils.py @@ -0,0 +1,156 @@ +from decoupled_wbc.control.robot_model.robot_model import RobotModel +from decoupled_wbc.data.constants import RS_VIEW_CAMERA_HEIGHT, RS_VIEW_CAMERA_WIDTH + + +def get_modality_config(robot_model: RobotModel, add_stereo_camera: bool = False) -> dict: + """ + Get the modality config for the robot model. + """ + left_hand_indices = sorted(robot_model.get_joint_group_indices("left_hand")) + right_hand_indices = sorted(robot_model.get_joint_group_indices("right_hand")) + left_arm_indices = sorted(robot_model.get_joint_group_indices("left_arm")) + right_arm_indices = sorted(robot_model.get_joint_group_indices("right_arm")) + waist_indices = sorted(robot_model.get_joint_group_indices("waist")) + left_leg_indices = sorted(robot_model.get_joint_group_indices("left_leg")) + right_leg_indices = sorted(robot_model.get_joint_group_indices("right_leg")) + + modality_config = { + "state": { + "left_leg": {"start": left_leg_indices[0], "end": left_leg_indices[-1] + 1}, + "right_leg": {"start": right_leg_indices[0], "end": right_leg_indices[-1] + 1}, + "waist": {"start": waist_indices[0], "end": waist_indices[-1] + 1}, + "left_arm": {"start": left_arm_indices[0], "end": left_arm_indices[-1] + 1}, + "left_hand": {"start": left_hand_indices[0], "end": left_hand_indices[-1] + 1}, + "right_arm": {"start": right_arm_indices[0], "end": right_arm_indices[-1] + 1}, + "right_hand": {"start": right_hand_indices[0], "end": right_hand_indices[-1] + 1}, + "left_wrist_pos": {"start": 0, "end": 3, "original_key": "observation.eef_state"}, + "left_wrist_abs_quat": { + "start": 3, + "end": 7, + "original_key": "observation.eef_state", + "rotation_type": "quaternion", + }, + "right_wrist_pos": {"start": 7, "end": 10, "original_key": "observation.eef_state"}, + "right_wrist_abs_quat": { + "start": 10, + "end": 14, + "original_key": "observation.eef_state", + "rotation_type": "quaternion", + }, + }, + "action": { + "left_leg": {"start": left_leg_indices[0], "end": left_leg_indices[-1] + 1}, + "right_leg": {"start": right_leg_indices[0], "end": right_leg_indices[-1] + 1}, + "waist": {"start": waist_indices[0], "end": waist_indices[-1] + 1}, + "left_arm": {"start": left_arm_indices[0], "end": left_arm_indices[-1] + 1}, + "left_hand": {"start": left_hand_indices[0], "end": left_hand_indices[-1] + 1}, + "right_arm": {"start": right_arm_indices[0], "end": right_arm_indices[-1] + 1}, + "right_hand": {"start": right_hand_indices[0], "end": right_hand_indices[-1] + 1}, + "left_wrist_pos": {"start": 0, "end": 3, "original_key": "action.eef"}, + "left_wrist_abs_quat": { + "start": 3, + "end": 7, + "original_key": "action.eef", + "rotation_type": "quaternion", + }, + "right_wrist_pos": {"start": 7, "end": 10, "original_key": "action.eef"}, + "right_wrist_abs_quat": { + "start": 10, + "end": 14, + "original_key": "action.eef", + "rotation_type": "quaternion", + }, + "base_height_command": { + "start": 0, + "end": 1, + "original_key": "teleop.base_height_command", + }, + "navigate_command": {"start": 0, "end": 3, "original_key": "teleop.navigate_command"}, + }, + "video": {"ego_view": {"original_key": "observation.images.ego_view"}}, + "annotation": {"human.task_description": {"original_key": "task_index"}}, + } + if add_stereo_camera: + modality_config["video"].update( + { + "ego_view_left_mono": {"original_key": "observation.images.ego_view_left_mono"}, + "ego_view_right_mono": {"original_key": "observation.images.ego_view_right_mono"}, + } + ) + + return modality_config + + +def get_dataset_features(robot_model: RobotModel, add_stereo_camera: bool = False) -> dict: + """ + Get the dataset features for the robot model. + """ + dataset_features = { + "observation.images.ego_view": { + "dtype": "video", + "shape": [RS_VIEW_CAMERA_HEIGHT, RS_VIEW_CAMERA_WIDTH, 3], + "names": ["height", "width", "channel"], + }, + "observation.state": { + "dtype": "float64", + "shape": (robot_model.num_joints,), + "names": robot_model.joint_names, + }, + "observation.eef_state": { + "dtype": "float64", + "shape": (14,), + "names": [ + "left_wrist_pos", + "left_wrist_abs_quat", + "right_wrist_pos", + "right_wrist_abs_quat", + ], + }, + "action": { + "dtype": "float64", + "shape": (robot_model.num_joints,), + "names": robot_model.joint_names, + }, + "action.eef": { + "dtype": "float64", + "shape": (14,), + "names": [ + "left_wrist_pos", + "left_wrist_abs_quat", + "right_wrist_pos", + "right_wrist_abs_quat", + ], + }, + "observation.img_state_delta": { + "dtype": "float32", + "shape": (1,), + "names": "img_state_delta", + }, + "teleop.navigate_command": { + "dtype": "float64", + "shape": (3,), + "names": ["lin_vel_x", "lin_vel_y", "ang_vel_z"], + }, + "teleop.base_height_command": { + "dtype": "float64", + "shape": (1,), + "names": "base_height_command", + }, + } + if add_stereo_camera: + dataset_features.update( + { + "observation.images.ego_view_left_mono": { + "dtype": "video", + "shape": [RS_VIEW_CAMERA_HEIGHT, RS_VIEW_CAMERA_WIDTH, 3], + "names": ["height", "width", "channel"], + }, + "observation.images.ego_view_right_mono": { + "dtype": "video", + "shape": [RS_VIEW_CAMERA_HEIGHT, RS_VIEW_CAMERA_WIDTH, 3], + "names": ["height", "width", "channel"], + }, + } + ) + + return dataset_features diff --git a/gr00t_wbc/data/video_writer.py b/decoupled_wbc/data/video_writer.py similarity index 100% rename from gr00t_wbc/data/video_writer.py rename to decoupled_wbc/data/video_writer.py diff --git a/decoupled_wbc/data/viz/rerun_viz.py b/decoupled_wbc/data/viz/rerun_viz.py new file mode 100644 index 0000000..c649640 --- /dev/null +++ b/decoupled_wbc/data/viz/rerun_viz.py @@ -0,0 +1,213 @@ +#!/usr/bin/env python3 +"""Rerun visualization utilities for plotting data and images.""" + +from __future__ import annotations + +import argparse +import time +from typing import Dict, List, Optional + +import cv2 +import numpy as np +import rerun as rr # pip install rerun-sdk +import rerun.blueprint as rrb + + +class RerunViz: + """Class for visualizing data using Rerun.""" + + def __init__( + self, + image_keys: List[str], + tensor_keys: List[str], + app_name: str = "rerun_visualization", + memory_limit: str = "1GB", + window_size: float = 5.0, + port: int = 9876, + in_docker: bool = False, + ): + """Initialize the RerunViz class. + Args: + app_name: Name of the Rerun application + memory_limit: Memory limit for Rerun + window_size: Size of the time window in seconds + image_keys: List of image keys to plot + tensor_keys: List of tensor keys to plot + in_docker: Whether running inside Docker container. If in docker, + forward data to outside of the container to be rendered. + Use `rerun --port 9876` to visualize. Expecting rerun-cli 0.22.1 outside of docker. + Tested with rerun-sdk 0.21.0 inside docker. + """ + self.app_name = app_name + self.memory_limit = memory_limit + self.window_size = window_size + self.tensor_keys = tensor_keys + self.image_keys = image_keys + self.port = port + self.in_docker = in_docker + # Initialize Rerun + self._initialize_rerun() + + def _initialize_rerun(self): + """Initialize Rerun and set up the blueprint.""" + rr.init(self.app_name) + if not self.in_docker: + # support for web visualization + rr.spawn(memory_limit=self.memory_limit, port=self.port, connect=True) + else: + # forward data to outside of the docker container + rr.connect(f"127.0.0.1:{self.port}") + self._create_blueprint() + + def _create_blueprint(self): + # Create a grid of plots + contents = [] + + # Add time series plots + for tensor_key in self.tensor_keys: + contents.append( + rrb.TimeSeriesView( + origin=tensor_key, + time_ranges=[ + rrb.VisibleTimeRange( + "time", + start=rrb.TimeRangeBoundary.cursor_relative(seconds=-self.window_size), + end=rrb.TimeRangeBoundary.cursor_relative(), + ) + ], + ) + ) + + # Add image views + for image_key in self.image_keys: + contents.append(rrb.Spatial2DView(origin=image_key, name=image_key)) + + # Send the blueprint with collapsed panels to hide side/bottom bars + rr.send_blueprint(rrb.Blueprint(rrb.Grid(contents=contents), collapse_panels=True)) + + def set_rerun_keys(self, image_keys: List[str], tensor_keys: List[str]): + """Set the Rerun keys.""" + self.image_keys = image_keys + self.tensor_keys = tensor_keys + self._create_blueprint() + + def plot_images(self, images: Dict[str, np.ndarray], timestamp: Optional[float] = None): + """Plot image data. + + Args: + images: Dictionary mapping image names to image data + timestamp: Timestamp for the data (if None, uses current time) + """ + if timestamp is None: + timestamp = time.time() + + rr.set_time_seconds("time", timestamp) + + for key, image in images.items(): + if image is None: + continue + + if "depth" in key: + # Color jet + depth_colormap = cv2.applyColorMap( + cv2.convertScaleAbs(image, alpha=0.03), cv2.COLORMAP_JET + ) + rr.log(f"{key}", rr.Image(depth_colormap)) + else: + # Convert to RGB + # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + rr.log(f"{key}", rr.Image(image)) + + def plot_tensors( + self, data: Optional[Dict[str, np.ndarray]] = None, timestamp: Optional[float] = None + ): + """Plot tensor data. + + Args: + data: Dictionary mapping keys to tensor values + timestamp: Timestamp for the data (if None, uses current time) + """ + if timestamp is None: + timestamp = time.time() + + rr.set_time_seconds("time", timestamp) + + # If no data provided, use random walk generators + for tensor_key in self.tensor_keys: + for i in range(data[tensor_key].shape[0]): + rr.log(f"{tensor_key}/{i}", rr.Scalar(data[tensor_key][i])) + + def close(self): + """Close the RerunViz instance.""" + rr.rerun_shutdown() + + +if __name__ == "__main__": + """Main function to demonstrate the RerunViz class.""" + parser = argparse.ArgumentParser(description="Plot dashboard stress test") + parser.add_argument( + "--freq", type=float, default=20, help="Frequency of logging (applies to all series)" + ) + parser.add_argument( + "--window-size", type=float, default=5.0, help="Size of the window in seconds" + ) + parser.add_argument("--duration", type=float, default=60, help="How long to log for in seconds") + parser.add_argument("--use-rs", action="store_true", help="Use RealSense sensor") + parser.add_argument("--use-zed", action="store_true", help="Use ZED sensor") + parser.add_argument("--in-docker", action="store_true", help="Running inside Docker container") + args = parser.parse_args() + + if args.use_rs: + image_keys = ["color_image", "depth_image"] + from decoupled_wbc.control.sensor.realsense import RealSenseClientSensor + + sensor = RealSenseClientSensor() + elif args.use_zed: + image_keys = ["left_image", "right_image"] + from decoupled_wbc.control.sensor.zed import ZEDClientSensor + + sensor = ZEDClientSensor() + else: + from decoupled_wbc.control.sensor.dummy import DummySensor + + sensor = DummySensor() + image_keys = ["color_image"] + + tensor_keys = ["left_arm_qpos", "left_hand_qpos", "right_arm_qpos", "right_hand_qpos"] + + # Initialize the RerunViz class + viz = RerunViz( + image_keys=image_keys, + tensor_keys=tensor_keys, + window_size=args.window_size, + in_docker=args.in_docker, + ) + + # Run the visualization loop + cur_time = time.time() + end_time = cur_time + args.duration + time_per_tick = 1.0 / args.freq + + while cur_time < end_time: + # Advance time and sleep if necessary + cur_time += time_per_tick + sleep_for = cur_time - time.time() + if sleep_for > 0: + time.sleep(sleep_for) + + if sleep_for < -0.1: + print(f"Warning: missed logging window by {-sleep_for:.2f} seconds") + + # Plot dummy tensor + dummy_tensor = np.random.randn(5) + dummy_tensor_dict = {key: dummy_tensor for key in tensor_keys} + + viz.plot_tensors(dummy_tensor_dict, cur_time) + + # Plot images if available + images = sensor.read() + if images is not None: + img_to_show = {key: images[key] for key in image_keys} + viz.plot_images(img_to_show, cur_time) + + rr.script_teardown(args) diff --git a/gr00t_wbc/dexmg/gr00trobocasa/requirements.txt b/decoupled_wbc/dexmg/gr00trobocasa/requirements.txt similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/requirements.txt rename to decoupled_wbc/dexmg/gr00trobocasa/requirements.txt diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/__init__.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/__init__.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/__init__.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/__init__.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/__init__.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/__init__.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/__init__.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/__init__.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/base.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/base.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/base.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/base.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/locomanip.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/locomanip.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/locomanip.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/locomanip.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/locomanip_basic.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/locomanip_basic.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/locomanip_basic.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/locomanip_basic.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/locomanip_dc.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/locomanip_dc.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/locomanip_dc.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/locomanip_dc.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/locomanip_pnp.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/locomanip_pnp.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/locomanip_pnp.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/environments/locomanipulation/locomanip_pnp.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/examples/third_party_controller/__init__.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/examples/third_party_controller/__init__.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/examples/third_party_controller/__init__.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/examples/third_party_controller/__init__.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/examples/third_party_controller/default_mink_ik_g1_gear_wbc.json b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/examples/third_party_controller/default_mink_ik_g1_gear_wbc.json similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/examples/third_party_controller/default_mink_ik_g1_gear_wbc.json rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/examples/third_party_controller/default_mink_ik_g1_gear_wbc.json diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/examples/third_party_controller/default_mink_ik_g1_gear_wbc_gc.json b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/examples/third_party_controller/default_mink_ik_g1_gear_wbc_gc.json similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/examples/third_party_controller/default_mink_ik_g1_gear_wbc_gc.json rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/examples/third_party_controller/default_mink_ik_g1_gear_wbc_gc.json diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/macros.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/macros.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/macros.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/macros.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/__init__.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/__init__.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/__init__.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/__init__.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/gear_factory.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/gear_factory.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/gear_factory.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/gear_factory.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_10.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_10.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_10.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_10.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_11.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_11.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_11.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_11.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_2.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_2.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_2.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_2.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_3.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_3.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_3.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_3.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_4.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_4.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_4.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_4.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_5.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_5.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_5.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_5.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_6.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_6.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_6.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_6.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_7.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_7.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_7.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_7.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_8.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_8.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_8.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_8.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_9.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_9.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_9.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_9.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_poles.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_poles.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_poles.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_poles.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_walls.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_walls.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_walls.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/gear_factory_walls.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/paint_lines.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/paint_lines.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/paint_lines.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/meshes/paint_lines.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Fence_Metal_AO.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Fence_Metal_AO.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Fence_Metal_AO.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Fence_Metal_AO.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Laser_Beam.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Laser_Beam.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Laser_Beam.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Laser_Beam.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Light_Curtain.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Light_Curtain.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Light_Curtain.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Light_Curtain.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Rack_3@Medium.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Rack_3@Medium.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Rack_3@Medium.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Rack_3@Medium.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Rack_4@Medium.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Rack_4@Medium.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Rack_4@Medium.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Rack_4@Medium.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Robots_1@Medium.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Robots_1@Medium.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Robots_1@Medium.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Robots_1@Medium.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Robots_1_Fence@Medium.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Robots_1_Fence@Medium.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Robots_1_Fence@Medium.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Robots_1_Fence@Medium.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Robots_2@Medium.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Robots_2@Medium.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Robots_2@Medium.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Robots_2@Medium.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Robots_2_Fence@Medium.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Robots_2_Fence@Medium.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Robots_2_Fence@Medium.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Robots_2_Fence@Medium.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Steel_ASTM_AO.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Steel_ASTM_AO.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Steel_ASTM_AO.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/Steel_ASTM_AO.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/T_Floor_A1_Albedo.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/T_Floor_A1_Albedo.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/T_Floor_A1_Albedo.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/T_Floor_A1_Albedo.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/factory@1024.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/factory@1024.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/factory@1024.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/factory@1024.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/mtl-base_color@Small.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/mtl-base_color@Small.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/mtl-base_color@Small.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_factory/textures/mtl-base_color@Small.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_lab/gear_lab.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_lab/gear_lab.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_lab/gear_lab.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_lab/gear_lab.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_lab/textures/floor.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_lab/textures/floor.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_lab/textures/floor.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_lab/textures/floor.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_lab/textures/wall.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_lab/textures/wall.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_lab/textures/wall.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/gear_lab/textures/wall.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/ground_arena.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/ground_arena.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/ground_arena.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/arenas/ground_arena.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/collision/tmpm_h04v7s_collision_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/collision/tmpm_h04v7s_collision_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/collision/tmpm_h04v7s_collision_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/collision/tmpm_h04v7s_collision_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/collision/tmpm_h04v7s_collision_1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/collision/tmpm_h04v7s_collision_1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/collision/tmpm_h04v7s_collision_1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/collision/tmpm_h04v7s_collision_1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/collision/tmpm_h04v7s_collision_2.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/collision/tmpm_h04v7s_collision_2.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/collision/tmpm_h04v7s_collision_2.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/collision/tmpm_h04v7s_collision_2.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/collision/tmpm_h04v7s_collision_3.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/collision/tmpm_h04v7s_collision_3.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/collision/tmpm_h04v7s_collision_3.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/collision/tmpm_h04v7s_collision_3.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/collision/tmpm_h04v7s_collision_4.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/collision/tmpm_h04v7s_collision_4.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/collision/tmpm_h04v7s_collision_4.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/collision/tmpm_h04v7s_collision_4.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/visual/image0.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/visual/image0.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/visual/image0.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/visual/image0.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/visual/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/visual/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/visual/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/visual/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/visual/model_normalized_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/visual/model_normalized_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/visual/model_normalized_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/apple_0/visual/model_normalized_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_a1/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_a1/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_a1/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_a1/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_a1/meshes/cardbox_a1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_a1/meshes/cardbox_a1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_a1/meshes/cardbox_a1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_a1/meshes/cardbox_a1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_a1/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_a1/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_a1/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_a1/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_a1/textures/T_Cardbox_A1_Albedo.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_a1/textures/T_Cardbox_A1_Albedo.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_a1/textures/T_Cardbox_A1_Albedo.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_a1/textures/T_Cardbox_A1_Albedo.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_b1/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_b1/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_b1/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_b1/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_b1/meshes/cardbox_b1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_b1/meshes/cardbox_b1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_b1/meshes/cardbox_b1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_b1/meshes/cardbox_b1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_b1/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_b1/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_b1/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_b1/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_b1/textures/T_Cardbox_B3_Albedo.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_b1/textures/T_Cardbox_B3_Albedo.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_b1/textures/T_Cardbox_B3_Albedo.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_b1/textures/T_Cardbox_B3_Albedo.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_c1/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_c1/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_c1/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_c1/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_c1/meshes/cardbox_c1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_c1/meshes/cardbox_c1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_c1/meshes/cardbox_c1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_c1/meshes/cardbox_c1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_c1/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_c1/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_c1/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_c1/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_c1/textures/T_Cardbox_C2_Albedo.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_c1/textures/T_Cardbox_C2_Albedo.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_c1/textures/T_Cardbox_C2_Albedo.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_c1/textures/T_Cardbox_C2_Albedo.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_d1/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_d1/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_d1/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_d1/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_d1/meshes/cardbox_d1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_d1/meshes/cardbox_d1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_d1/meshes/cardbox_d1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_d1/meshes/cardbox_d1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_d1/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_d1/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_d1/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_d1/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_d1/textures/T_Cardbox_D2_Albedo.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_d1/textures/T_Cardbox_D2_Albedo.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_d1/textures/T_Cardbox_D2_Albedo.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/cardbox_d1/textures/T_Cardbox_D2_Albedo.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_2.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_2.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_2.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_2.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_3.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_3.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_3.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_3.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_4.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_4.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_4.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_4.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_5.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_5.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_5.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_5.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_6.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_6.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_6.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_6.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_10.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_10.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_10.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_10.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_11.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_11.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_11.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_11.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_2.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_2.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_2.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_2.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_3.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_3.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_3.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_3.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_4.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_4.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_4.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_4.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_5.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_5.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_5.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_5.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_6.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_6.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_6.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_6.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_7.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_7.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_7.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_7.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_8.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_8.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_8.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_8.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_9.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_9.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_9.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/meshes/clock_0_0_collision_0_9.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/textures/chasovaya_baseColor.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/textures/chasovaya_baseColor.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/textures/chasovaya_baseColor.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/textures/chasovaya_baseColor.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/textures/ciferblat_baseColor.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/textures/ciferblat_baseColor.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/textures/ciferblat_baseColor.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/textures/ciferblat_baseColor.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/textures/hrom_baseColor.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/textures/hrom_baseColor.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/textures/hrom_baseColor.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/textures/hrom_baseColor.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/textures/korpus_baseColor.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/textures/korpus_baseColor.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/textures/korpus_baseColor.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/textures/korpus_baseColor.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/textures/minutnaya_baseColor.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/textures/minutnaya_baseColor.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/textures/minutnaya_baseColor.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/clock_0/textures/minutnaya_baseColor.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/meshes/control_box_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/meshes/control_box_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/meshes/control_box_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/meshes/control_box_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/meshes/control_box_1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/meshes/control_box_1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/meshes/control_box_1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/meshes/control_box_1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/meshes/control_box_2.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/meshes/control_box_2.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/meshes/control_box_2.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/meshes/control_box_2.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/meshes/control_box_3.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/meshes/control_box_3.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/meshes/control_box_3.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/meshes/control_box_3.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/textures/Switch_Box_Rack_AO.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/textures/Switch_Box_Rack_AO.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/textures/Switch_Box_Rack_AO.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_box/textures/Switch_Box_Rack_AO.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/ControlPanel_AO.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/ControlPanel_AO.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/ControlPanel_AO.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/ControlPanel_AO.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_4.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_4.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_4.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_4.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_body.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_body.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_body.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_body.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_body.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_body.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_body.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_body.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_button.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_button.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_button.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_button.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_collision_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_collision_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_collision_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_collision_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_collision_1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_collision_1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_collision_1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_collision_1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_collision_2.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_collision_2.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_collision_2.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_collision_2.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_lever.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_lever.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_lever.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/meshes/control_conveyorbelt_a08_lever.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/textures/ControlPanel_AO.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/textures/ControlPanel_AO.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/textures/ControlPanel_AO.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/control_conveyorbelt_a08/textures/ControlPanel_AO.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_2.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_2.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_2.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_2.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_convex_border_l.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_convex_border_l.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_convex_border_l.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_convex_border_l.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_convex_border_r.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_convex_border_r.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_convex_border_r.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_convex_border_r.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_convex_platform.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_convex_platform.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_convex_platform.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/meshes/dock_board_a12_convex_platform.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/textures/T_Aluminium_Brushed_A1_Albedo.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/textures/T_Aluminium_Brushed_A1_Albedo.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/textures/T_Aluminium_Brushed_A1_Albedo.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/textures/T_Aluminium_Brushed_A1_Albedo.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/textures/T_Plastic_Orange_B_Albedo.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/textures/T_Plastic_Orange_B_Albedo.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/textures/T_Plastic_Orange_B_Albedo.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/textures/T_Plastic_Orange_B_Albedo.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/textures/t_metal_circles_grid_grey_a_Albedo.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/textures/t_metal_circles_grid_grey_a_Albedo.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/textures/t_metal_circles_grid_grey_a_Albedo.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/dock_board_a12/textures/t_metal_circles_grid_grey_a_Albedo.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/meshes/ergo_table_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/meshes/ergo_table_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/meshes/ergo_table_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/meshes/ergo_table_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/meshes/ergo_table_1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/meshes/ergo_table_1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/meshes/ergo_table_1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/meshes/ergo_table_1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/meshes/ergo_table_2.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/meshes/ergo_table_2.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/meshes/ergo_table_2.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/meshes/ergo_table_2.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/meshes/ergo_table_3.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/meshes/ergo_table_3.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/meshes/ergo_table_3.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/meshes/ergo_table_3.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/textures/Rubber_Smooth_BaseColor.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/textures/Rubber_Smooth_BaseColor.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/textures/Rubber_Smooth_BaseColor.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/factory_ergo_table/textures/Rubber_Smooth_BaseColor.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/jug_a01/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/jug_a01/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/jug_a01/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/jug_a01/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/jug_a01/meshes/jug_a01.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/jug_a01/meshes/jug_a01.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/jug_a01/meshes/jug_a01.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/jug_a01/meshes/jug_a01.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/jug_a01/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/jug_a01/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/jug_a01/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/jug_a01/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/jug_a01/textures/Jug_A.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/jug_a01/textures/Jug_A.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/jug_a01/textures/Jug_A.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/jug_a01/textures/Jug_A.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_shelf/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_shelf/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_shelf/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_shelf/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_shelf_board/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_shelf_board/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_shelf_board/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_shelf_board/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_shelf_cabinet/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_shelf_cabinet/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_shelf_cabinet/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_shelf_cabinet/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_table/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_table/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_table/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_table/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_table/meshes/lab_table.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_table/meshes/lab_table.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_table/meshes/lab_table.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_table/meshes/lab_table.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_table/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_table/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_table/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_table/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_table/textures/Image_0.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_table/textures/Image_0.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_table/textures/Image_0.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/lab_table/textures/Image_0.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/meshes/longbox_a08_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/meshes/longbox_a08_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/meshes/longbox_a08_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/meshes/longbox_a08_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/meshes/longbox_a08_1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/meshes/longbox_a08_1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/meshes/longbox_a08_1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/meshes/longbox_a08_1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/textures/T_LongBox_A01_Tile_Albedo.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/textures/T_LongBox_A01_Tile_Albedo.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/textures/T_LongBox_A01_Tile_Albedo.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/textures/T_LongBox_A01_Tile_Albedo.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/textures/T_LongBox_A01_Trim_Albedo.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/textures/T_LongBox_A01_Trim_Albedo.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/textures/T_LongBox_A01_Trim_Albedo.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a08/textures/T_LongBox_A01_Trim_Albedo.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/meshes/longbox_a09_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/meshes/longbox_a09_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/meshes/longbox_a09_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/meshes/longbox_a09_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/meshes/longbox_a09_1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/meshes/longbox_a09_1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/meshes/longbox_a09_1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/meshes/longbox_a09_1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/textures/T_LongBox_A01_Tile_Albedo.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/textures/T_LongBox_A01_Tile_Albedo.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/textures/T_LongBox_A01_Tile_Albedo.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/textures/T_LongBox_A01_Tile_Albedo.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/textures/T_LongBox_A01_Trim_Albedo.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/textures/T_LongBox_A01_Trim_Albedo.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/textures/T_LongBox_A01_Trim_Albedo.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/longbox_a09/textures/T_LongBox_A01_Trim_Albedo.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_2.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_2.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_2.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_2.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_3.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_3.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_3.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_3.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_4.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_4.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_4.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_4.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_wheel_disk.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_wheel_disk.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_wheel_disk.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_wheel_disk.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_wheel_hinge.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_wheel_hinge.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_wheel_hinge.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_wheel_hinge.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_wheel_rubber.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_wheel_rubber.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_wheel_rubber.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/meshes/mobile_shelving_cart_wheel_rubber.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/textures/T_Aluminium_Brushed_A1_Albedo.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/textures/T_Aluminium_Brushed_A1_Albedo.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/textures/T_Aluminium_Brushed_A1_Albedo.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/textures/T_Aluminium_Brushed_A1_Albedo.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/textures/T_Plastic_Blue_A_Albedo.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/textures/T_Plastic_Blue_A_Albedo.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/textures/T_Plastic_Blue_A_Albedo.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/mobile_shelving_cart/textures/T_Plastic_Blue_A_Albedo.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_b1/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_b1/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_b1/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_b1/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_b1/meshes/pallet_b1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_b1/meshes/pallet_b1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_b1/meshes/pallet_b1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_b1/meshes/pallet_b1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_b1/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_b1/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_b1/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_b1/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_b1/textures/T_Pallet_A2_Albedo.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_b1/textures/T_Pallet_A2_Albedo.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_b1/textures/T_Pallet_A2_Albedo.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_b1/textures/T_Pallet_A2_Albedo.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_c1/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_c1/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_c1/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_c1/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_c1/meshes/pallet_c1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_c1/meshes/pallet_c1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_c1/meshes/pallet_c1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_c1/meshes/pallet_c1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_c1/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_c1/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_c1/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_c1/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_c1/textures/T_Pallet_A3_Albedo.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_c1/textures/T_Pallet_A3_Albedo.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_c1/textures/T_Pallet_A3_Albedo.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/pallet_c1/textures/T_Pallet_A3_Albedo.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/collision/tmpzdovka8d_collision_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/collision/tmpzdovka8d_collision_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/collision/tmpzdovka8d_collision_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/collision/tmpzdovka8d_collision_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/collision/tmpzdovka8d_collision_1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/collision/tmpzdovka8d_collision_1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/collision/tmpzdovka8d_collision_1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/collision/tmpzdovka8d_collision_1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/collision/tmpzdovka8d_collision_2.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/collision/tmpzdovka8d_collision_2.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/collision/tmpzdovka8d_collision_2.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/collision/tmpzdovka8d_collision_2.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/collision/tmpzdovka8d_collision_3.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/collision/tmpzdovka8d_collision_3.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/collision/tmpzdovka8d_collision_3.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/collision/tmpzdovka8d_collision_3.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/visual/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/visual/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/visual/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/visual/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/visual/model_normalized_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/visual/model_normalized_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/visual/model_normalized_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/plate_1/visual/model_normalized_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/meshes/rubix_cube_1_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/meshes/rubix_cube_1_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/meshes/rubix_cube_1_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/meshes/rubix_cube_1_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/meshes/rubix_cube_1_0_collision_0_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/meshes/rubix_cube_1_0_collision_0_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/meshes/rubix_cube_1_0_collision_0_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/meshes/rubix_cube_1_0_collision_0_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/meshes/rubix_cube_1_0_collision_0_1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/meshes/rubix_cube_1_0_collision_0_1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/meshes/rubix_cube_1_0_collision_0_1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/meshes/rubix_cube_1_0_collision_0_1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/meshes/rubix_cube_1_0_collision_0_2.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/meshes/rubix_cube_1_0_collision_0_2.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/meshes/rubix_cube_1_0_collision_0_2.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/meshes/rubix_cube_1_0_collision_0_2.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/meshes/rubix_cube_1_0_collision_0_3.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/meshes/rubix_cube_1_0_collision_0_3.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/meshes/rubix_cube_1_0_collision_0_3.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/meshes/rubix_cube_1_0_collision_0_3.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/textures/Material.002_baseColor.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/textures/Material.002_baseColor.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/textures/Material.002_baseColor.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/rubix_cube_1/textures/Material.002_baseColor.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/meshes/shelf_a12_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/meshes/shelf_a12_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/meshes/shelf_a12_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/meshes/shelf_a12_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/meshes/shelf_a12_1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/meshes/shelf_a12_1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/meshes/shelf_a12_1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/meshes/shelf_a12_1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/textures/Shelf_A12_Poles.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/textures/Shelf_A12_Poles.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/textures/Shelf_A12_Poles.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/textures/Shelf_A12_Poles.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/textures/Shelf_A12_Shelves.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/textures/Shelf_A12_Shelves.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/textures/Shelf_A12_Shelves.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/shelf_a12/textures/Shelf_A12_Shelves.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/target_zone/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/target_zone/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/target_zone/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/target_zone/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/target_zone/textures/Target_Zone.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/target_zone/textures/Target_Zone.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/target_zone/textures/Target_Zone.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/target_zone/textures/Target_Zone.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f01/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f01/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f01/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f01/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f01/meshes/tote_f01_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f01/meshes/tote_f01_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f01/meshes/tote_f01_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f01/meshes/tote_f01_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f01/meshes/tote_f01_1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f01/meshes/tote_f01_1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f01/meshes/tote_f01_1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f01/meshes/tote_f01_1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f01/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f01/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f01/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f01/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f02/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f02/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f02/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f02/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f02/meshes/tote_f02_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f02/meshes/tote_f02_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f02/meshes/tote_f02_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f02/meshes/tote_f02_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f02/meshes/tote_f02_1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f02/meshes/tote_f02_1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f02/meshes/tote_f02_1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f02/meshes/tote_f02_1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f02/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f02/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f02/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/tote_f02/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/material.mtl b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/material.mtl similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/material.mtl rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/material.mtl diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/wheel_disk.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/wheel_disk.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/wheel_disk.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/wheel_disk.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/wheel_hinge.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/wheel_hinge.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/wheel_hinge.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/wheel_hinge.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/wheel_rubber.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/wheel_rubber.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/wheel_rubber.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/wheel_rubber.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_0.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_0.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_0.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_0.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_1.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_1.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_1.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_1.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_2.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_2.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_2.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_2.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_3.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_3.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_3.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_3.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_4.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_4.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_4.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_4.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_5.obj b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_5.obj similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_5.obj rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/meshes/workshop_trolley_a01_5.obj diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/model.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/model.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/model.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/model.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/textures/T_Plastic_Blue_A_Albedo.png b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/textures/T_Plastic_Blue_A_Albedo.png similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/textures/T_Plastic_Blue_A_Albedo.png rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/objects/omniverse/locomanip/workshop_trolley_a01/textures/T_Plastic_Blue_A_Albedo.png diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/g1_29dof_rev_1_0.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/g1_29dof_rev_1_0.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/g1_29dof_rev_1_0.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/g1_29dof_rev_1_0.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/g1_threefinger_left_hand.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/g1_threefinger_left_hand.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/g1_threefinger_left_hand.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/g1_threefinger_left_hand.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/g1_threefinger_right_hand.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/g1_threefinger_right_hand.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/g1_threefinger_right_hand.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/g1_threefinger_right_hand.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/head_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/head_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/head_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/head_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_ankle_pitch_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_ankle_pitch_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_ankle_pitch_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_ankle_pitch_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_ankle_roll_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_ankle_roll_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_ankle_roll_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_ankle_roll_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_elbow_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_elbow_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_elbow_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_elbow_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_elbow_link_merge.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_elbow_link_merge.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_elbow_link_merge.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_elbow_link_merge.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_index_0_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_index_0_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_index_0_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_index_0_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_index_1_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_index_1_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_index_1_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_index_1_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_middle_0_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_middle_0_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_middle_0_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_middle_0_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_middle_1_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_middle_1_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_middle_1_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_middle_1_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_palm_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_palm_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_palm_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_palm_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_thumb_0_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_thumb_0_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_thumb_0_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_thumb_0_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_thumb_1_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_thumb_1_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_thumb_1_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_thumb_1_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_thumb_2_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_thumb_2_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_thumb_2_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hand_thumb_2_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hip_pitch_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hip_pitch_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hip_pitch_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hip_pitch_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hip_roll_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hip_roll_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hip_roll_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hip_roll_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hip_yaw_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hip_yaw_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hip_yaw_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_hip_yaw_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_knee_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_knee_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_knee_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_knee_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_rubber_hand.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_rubber_hand.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_rubber_hand.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_rubber_hand.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_shoulder_pitch_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_shoulder_pitch_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_shoulder_pitch_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_shoulder_pitch_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_shoulder_roll_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_shoulder_roll_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_shoulder_roll_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_shoulder_roll_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_shoulder_yaw_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_shoulder_yaw_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_shoulder_yaw_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_shoulder_yaw_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_wrist_pitch_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_wrist_pitch_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_wrist_pitch_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_wrist_pitch_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_wrist_roll_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_wrist_roll_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_wrist_roll_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_wrist_roll_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_wrist_roll_rubber_hand.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_wrist_roll_rubber_hand.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_wrist_roll_rubber_hand.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_wrist_roll_rubber_hand.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_wrist_yaw_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_wrist_yaw_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_wrist_yaw_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/left_wrist_yaw_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/logo_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/logo_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/logo_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/logo_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/pelvis.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/pelvis.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/pelvis.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/pelvis.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/pelvis_contour_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/pelvis_contour_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/pelvis_contour_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/pelvis_contour_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_ankle_pitch_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_ankle_pitch_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_ankle_pitch_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_ankle_pitch_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_ankle_roll_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_ankle_roll_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_ankle_roll_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_ankle_roll_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_elbow_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_elbow_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_elbow_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_elbow_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_elbow_link_merge.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_elbow_link_merge.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_elbow_link_merge.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_elbow_link_merge.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_index_0_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_index_0_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_index_0_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_index_0_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_index_1_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_index_1_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_index_1_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_index_1_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_middle_0_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_middle_0_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_middle_0_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_middle_0_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_middle_1_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_middle_1_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_middle_1_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_middle_1_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_palm_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_palm_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_palm_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_palm_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_thumb_0_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_thumb_0_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_thumb_0_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_thumb_0_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_thumb_1_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_thumb_1_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_thumb_1_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_thumb_1_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_thumb_2_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_thumb_2_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_thumb_2_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hand_thumb_2_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hip_pitch_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hip_pitch_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hip_pitch_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hip_pitch_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hip_roll_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hip_roll_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hip_roll_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hip_roll_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hip_yaw_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hip_yaw_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hip_yaw_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_hip_yaw_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_knee_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_knee_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_knee_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_knee_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_rubber_hand.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_rubber_hand.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_rubber_hand.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_rubber_hand.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_shoulder_pitch_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_shoulder_pitch_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_shoulder_pitch_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_shoulder_pitch_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_shoulder_roll_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_shoulder_roll_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_shoulder_roll_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_shoulder_roll_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_shoulder_yaw_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_shoulder_yaw_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_shoulder_yaw_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_shoulder_yaw_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_wrist_pitch_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_wrist_pitch_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_wrist_pitch_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_wrist_pitch_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_wrist_roll_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_wrist_roll_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_wrist_roll_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_wrist_roll_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_wrist_roll_rubber_hand.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_wrist_roll_rubber_hand.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_wrist_roll_rubber_hand.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_wrist_roll_rubber_hand.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_wrist_yaw_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_wrist_yaw_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_wrist_yaw_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/right_wrist_yaw_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_constraint_L_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_constraint_L_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_constraint_L_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_constraint_L_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_constraint_L_rod_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_constraint_L_rod_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_constraint_L_rod_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_constraint_L_rod_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_constraint_R_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_constraint_R_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_constraint_R_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_constraint_R_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_constraint_R_rod_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_constraint_R_rod_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_constraint_R_rod_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_constraint_R_rod_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_link_rev_1_0.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_link_rev_1_0.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_link_rev_1_0.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/torso_link_rev_1_0.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_constraint_L.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_constraint_L.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_constraint_L.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_constraint_L.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_constraint_R.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_constraint_R.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_constraint_R.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_constraint_R.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_roll_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_roll_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_roll_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_roll_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_roll_link_rev_1_0.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_roll_link_rev_1_0.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_roll_link_rev_1_0.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_roll_link_rev_1_0.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_support_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_support_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_support_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_support_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_yaw_link.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_yaw_link.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_yaw_link.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_yaw_link.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_yaw_link_rev_1_0.STL b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_yaw_link_rev_1_0.STL similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_yaw_link_rev_1_0.STL rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/assets/robots/unitree_g1/meshes/waist_yaw_link_rev_1_0.STL diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/grippers/__init__.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/grippers/__init__.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/grippers/__init__.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/grippers/__init__.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/grippers/g1_threefinger_hands.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/grippers/g1_threefinger_hands.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/grippers/g1_threefinger_hands.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/grippers/g1_threefinger_hands.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/__init__.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/__init__.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/__init__.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/__init__.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/__init__.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/__init__.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/__init__.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/__init__.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/bin.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/bin.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/bin.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/bin.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/box_pattern_object.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/box_pattern_object.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/box_pattern_object.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/box_pattern_object.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/lid.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/lid.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/lid.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/lid.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/needle.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/needle.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/needle.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/needle.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/pot_with_handles.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/pot_with_handles.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/pot_with_handles.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/pot_with_handles.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/ring_tripod.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/ring_tripod.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/ring_tripod.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite/ring_tripod.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/__init__.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/__init__.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/__init__.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/__init__.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/bin_with_handles.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/bin_with_handles.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/bin_with_handles.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/bin_with_handles.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/coffee_machine.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/coffee_machine.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/coffee_machine.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/coffee_machine.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/cup.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/cup.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/cup.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/cup.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/inverse_stacked_cylinder.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/inverse_stacked_cylinder.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/inverse_stacked_cylinder.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/inverse_stacked_cylinder.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/lightbulb.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/lightbulb.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/lightbulb.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/lightbulb.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/sliding_box.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/sliding_box.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/sliding_box.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/sliding_box.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/socket.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/socket.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/socket.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/socket.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/spray_bottle.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/spray_bottle.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/spray_bottle.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/spray_bottle.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/stacked_box.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/stacked_box.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/stacked_box.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/stacked_box.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/stacked_cylinder.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/stacked_cylinder.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/stacked_cylinder.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/stacked_cylinder.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/stove_plug.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/stove_plug.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/stove_plug.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/composite_body/stove_plug.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/objects.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/objects.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/objects.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/objects.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/xml_objects.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/xml_objects.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/objects/xml_objects.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/objects/xml_objects.py diff --git a/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/robots/__init__.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/robots/__init__.py new file mode 100644 index 0000000..ecb1911 --- /dev/null +++ b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/robots/__init__.py @@ -0,0 +1,779 @@ +from numpy import dtype, ndarray + + +from typing import Any + + +import mujoco +import numpy as np + +from robosuite.controllers.parts.generic.joint_pos import JointPositionController +from robosuite.controllers.parts.gripper.simple_grip import SimpleGripController +from robosuite.controllers.composite.composite_controller import HybridMobileBase +from robosuite.models.grippers import ( + PandaGripper, + InspireLeftHand, + InspireRightHand, + FourierLeftHand, + FourierRightHand, +) + +from robocasa.models.grippers import G1ThreeFingerLeftHand, G1ThreeFingerRightHand + +from .manipulators import * + + +# This function returns in the order of the gripper XML's actuators. +def unformat_gripper_space(gripper, formatted_action): + formatted_action = np.array(formatted_action) + if isinstance(gripper, InspireLeftHand) or isinstance(gripper, InspireRightHand): + action = formatted_action[[0, 2, 4, 6, 8, 11]] + elif isinstance(gripper, FourierLeftHand) or isinstance(gripper, FourierRightHand): + action = formatted_action[[0, 1, 4, 6, 8, 10]] + elif isinstance(gripper, PandaGripper): + action = formatted_action + else: + raise TypeError + return action + + +def remove_mimic_joints(gripper, formatted_action) -> ndarray[Any, dtype[Any]]: + """_summary_ + + This is to remove the mimic joints from the actuator-ordered action + """ + formatted_action = np.array(formatted_action) + if isinstance(gripper, InspireLeftHand) or isinstance(gripper, InspireRightHand): + raise NotImplementedError + elif isinstance(gripper, FourierLeftHand) or isinstance(gripper, FourierRightHand): + action = formatted_action[[0, 2, 4, 6, 8, 10]] + elif isinstance(gripper, G1ThreeFingerLeftHand) or isinstance(gripper, G1ThreeFingerRightHand): + action = formatted_action + elif isinstance(gripper, PandaGripper): + action = formatted_action + return action + + +def _reconstruct_latest_action_gr1_impl(robot, verbose=False): + action_dict = {} + cc = robot.composite_controller + pf = robot.robot_model.naming_prefix + + for part_name, controller in cc.part_controllers.items(): + if isinstance(controller, JointPositionController): + assert controller.input_type == "absolute" + act = controller.goal_qpos + elif isinstance(controller, SimpleGripController): + assert not controller.use_action_scaling + act = unformat_gripper_space(cc.grippers[part_name], controller.goal_qvel) + else: + raise TypeError + action_dict[f"{pf}{part_name}"] = act + + if verbose: + print("Actions:", [(k, len(action_dict[k])) for k in action_dict]) + + return action_dict + + +def _reconstruct_latest_action_panda_impl(robot, action, verbose=False): + action_dict = {} + cc = robot.composite_controller + pf = robot.robot_model.naming_prefix + + for part_name, controller in cc.part_controllers.items(): + start_idx, end_idx = cc._action_split_indexes[part_name] + act = action[start_idx:end_idx] + # split action deeper for OSC + action_dict[f"{pf}{part_name}"] = act + + if verbose: + print("Actions:", [(k, len(action_dict[k])) for k in action_dict]) + + return action_dict + + +# This function tries to get all joints value, but sometimes we want to keep original action +# The function must be called after robot.control() +def reconstruct_latest_actions(env, actions=None, verbose=False): + if actions is None: + actions = np.zeros(env.action_dim) + action_dict = {} + cutoff = 0 + for robot in env.robots: + cc = robot.composite_controller + pf = robot.robot_model.naming_prefix + robot_action = actions[cutoff : cutoff + robot.action_dim] + cutoff += robot.action_dim + if "GR1" in robot.name: + action_dict.update(_reconstruct_latest_action_gr1_impl(robot)) + elif "Panda" in robot.name: + action_dict.update(_reconstruct_latest_action_panda_impl(robot, robot_action)) + else: + raise ValueError(f"Unknown robot name: {robot.name}") + if isinstance(cc, HybridMobileBase): + action_dict[f"{pf}base_mode"] = robot_action[-1] + + if verbose: + print("Actions:", [(k, len(action_dict[k])) for k in action_dict]) + + return action_dict + + +def gather_robot_observations(env, verbose=False, format_gripper_space=True): + observations = {} + + for robot_id, robot in enumerate(env.robots): + sim = robot.sim + gripper_names = {robot.get_gripper_name(arm): robot.gripper[arm] for arm in robot.arms} + for part_name, indexes in robot._ref_joints_indexes_dict.items(): + qpos_values = [] + for joint_id in indexes: + qpos_addr = sim.model.jnt_qposadr[joint_id] + # https://mujoco.readthedocs.io/en/stable/APIreference/APItypes.html#mjtjoint + joint_type = sim.model.jnt_type[joint_id] + if joint_type == mujoco.mjtJoint.mjJNT_FREE: + qpos_size = 7 # Free joint has 7 DOFs + elif joint_type == mujoco.mjtJoint.mjJNT_BALL: + qpos_size = 4 # Ball joint has 4 DOFs (quaternion) + else: + qpos_size = 1 # Revolute or prismatic joint has 1 DOF + qpos_values = np.append( + qpos_values, sim.data.qpos[qpos_addr : qpos_addr + qpos_size] + ) + if part_name in gripper_names.keys(): + gripper = gripper_names[part_name] + # Reverse the order to match the real robot + # TODO: how to adapt to other grippers? + if format_gripper_space: + qpos_values = unformat_gripper_space(gripper, qpos_values)[::-1] + if len(qpos_values) > 0: + observations[f"robot{robot_id}_{part_name}"] = qpos_values + + if verbose: + print("States:", [(k, len(observations[k])) for k in observations]) + + return observations + + +from gymnasium import spaces + + +class RobotKeyConverter: + @classmethod + def get_camera_config(cls): + raise NotImplementedError + + @classmethod + def map_obs(cls, input_obs): + raise NotImplementedError + + @classmethod + def map_action(cls, input_action): + raise NotImplementedError + + @classmethod + def unmap_action(cls, input_action): + raise NotImplementedError + + @classmethod + def get_metadata(cls, name): + raise NotImplementedError + + @classmethod + def map_obs_in_eval(cls, input_obs): + output_obs = {} + mapped_obs = cls.map_obs(input_obs) + for k, v in mapped_obs.items(): + assert k.startswith("hand.") or k.startswith("body.") + output_obs["state." + k[5:]] = v + return output_obs + + @classmethod + def get_missing_keys_in_dumping_dataset(cls): + return {} + + @classmethod + def convert_to_float64(cls, input): + for k, v in input.items(): + if isinstance(v, np.ndarray) and v.dtype == np.float32: + input[k] = v.astype(np.float64) + return input + + @classmethod + def deduce_observation_space(cls, env): + # Calling env._get_observations(force_update=True) will pollute the observables. + # This is safe because it is only called from the constructor and will be overwritten later in reset(). + obs = ( + env.viewer._get_observations(force_update=True) + if env.viewer_get_obs + else env._get_observations(force_update=True) + ) + obs.update(gather_robot_observations(env)) + obs = cls.map_obs(obs) + observation_space = spaces.Dict() + + for k, v in obs.items(): + if k.startswith("hand.") or k.startswith("body."): + observation_space["state." + k[5:]] = spaces.Box( + low=-1, high=1, shape=(len(v),), dtype=np.float32 + ) + else: + raise ValueError(f"Unknown key: {k}") + + return observation_space + + @classmethod + def deduce_action_space(cls, env): + action = cls.map_action(reconstruct_latest_actions(env)) + action_space = spaces.Dict() + for k, v in action.items(): + if isinstance(v, np.int64): + action_space["action." + k[5:]] = spaces.Discrete(2) + elif isinstance(v, np.ndarray): + action_space["action." + k[5:]] = spaces.Box( + low=-1, high=1, shape=(len(v),), dtype=np.float32 + ) + else: + raise ValueError(f"Unknown type: {type(v)}") + return action_space + + +class GR1ArmsOnlyKeyConverter(RobotKeyConverter): + @classmethod + def get_camera_config(cls): + mapped_names = ["video.ego_view_pad_res256_freq20"] + camera_names = [ + "egoview", + ] + camera_widths, camera_heights = 1280, 800 + return mapped_names, camera_names, camera_widths, camera_heights + + @classmethod + def map_obs(cls, input_obs): + output_obs = type(input_obs)() + output_obs = { + "hand.right_hand": input_obs["robot0_right_gripper"], + "hand.left_hand": input_obs["robot0_left_gripper"], + "body.right_arm": input_obs["robot0_right"], + "body.left_arm": input_obs["robot0_left"], + } + return output_obs + + @classmethod + def map_action(cls, input_action): + output_action = type(input_action)() + output_action = { + "hand.left_hand": input_action["robot0_left_gripper"], + "hand.right_hand": input_action["robot0_right_gripper"], + "body.left_arm": input_action["robot0_left"], + "body.right_arm": input_action["robot0_right"], + } + return output_action + + @classmethod + def unmap_action(cls, input_action): + output_action = type(input_action)() + output_action = { + "robot0_left_gripper": input_action["action.left_hand"], + "robot0_right_gripper": input_action["action.right_hand"], + "robot0_left": input_action["action.left_arm"], + "robot0_right": input_action["action.right_arm"], + } + return output_action + + @classmethod + def get_missing_keys_in_dumping_dataset(cls): + return { + "body.waist": np.zeros(3, dtype=np.float64), + "body.neck": np.zeros(3, dtype=np.float64), + "body.right_leg": np.zeros(6, dtype=np.float64), + "body.left_leg": np.zeros(6, dtype=np.float64), + } + + @classmethod + def get_metadata(cls, name): + return { + "absolute": True, + "rotation_type": None, + } + + +class GR1ArmsAndWaistKeyConverter(RobotKeyConverter): + @classmethod + def get_camera_config(cls): + mapped_names = ["video.ego_view_pad_res256_freq20"] + camera_names = [ + "egoview", + ] + camera_widths, camera_heights = 1280, 800 + return mapped_names, camera_names, camera_widths, camera_heights + + @classmethod + def map_obs(cls, input_obs): + output_obs = type(input_obs)() + output_obs = { + "hand.right_hand": input_obs["robot0_right_gripper"], + "hand.left_hand": input_obs["robot0_left_gripper"], + "body.right_arm": input_obs["robot0_right"], + "body.left_arm": input_obs["robot0_left"], + "body.waist": input_obs["robot0_torso"], + } + return output_obs + + @classmethod + def map_action(cls, input_action): + output_action = type(input_action)() + output_action = { + "hand.left_hand": input_action["robot0_left_gripper"], + "hand.right_hand": input_action["robot0_right_gripper"], + "body.left_arm": input_action["robot0_left"], + "body.right_arm": input_action["robot0_right"], + "body.waist": input_action["robot0_torso"], + } + return output_action + + @classmethod + def unmap_action(cls, input_action): + output_action = type(input_action)() + output_action = { + "robot0_left_gripper": input_action["action.left_hand"], + "robot0_right_gripper": input_action["action.right_hand"], + "robot0_left": input_action["action.left_arm"], + "robot0_right": input_action["action.right_arm"], + "robot0_torso": input_action["action.waist"], + } + return output_action + + @classmethod + def get_missing_keys_in_dumping_dataset(cls): + return { + "body.neck": np.zeros(3, dtype=np.float64), + "body.right_leg": np.zeros(6, dtype=np.float64), + "body.left_leg": np.zeros(6, dtype=np.float64), + } + + @classmethod + def get_metadata(cls, name): + return { + "absolute": True, + "rotation_type": None, + } + + +class GR1FixedLowerBodyKeyConverter(RobotKeyConverter): + @classmethod + def get_camera_config(cls): + mapped_names = ["video.agentview_pad_res256_freq20"] + camera_names = [ + "agentview", + ] + camera_widths, camera_heights = 1280, 800 + return mapped_names, camera_names, camera_widths, camera_heights + + @classmethod + def map_obs(cls, input_obs): + output_obs = type(input_obs)() + output_obs = { + "hand.right_hand": input_obs["robot0_right_gripper"], + "hand.left_hand": input_obs["robot0_left_gripper"], + "body.right_arm": input_obs["robot0_right"], + "body.left_arm": input_obs["robot0_left"], + "body.waist": input_obs["robot0_torso"], + "body.neck": input_obs["robot0_head"], + } + return output_obs + + @classmethod + def map_action(cls, input_action): + output_action = type(input_action)() + output_action = { + "hand.left_hand": input_action["robot0_left_gripper"], + "hand.right_hand": input_action["robot0_right_gripper"], + "body.left_arm": input_action["robot0_left"], + "body.right_arm": input_action["robot0_right"], + "body.waist": input_action["robot0_torso"], + "body.neck": input_action["robot0_head"], + } + return output_action + + @classmethod + def unmap_action(cls, input_action): + output_action = type(input_action)() + output_action = { + "robot0_left_gripper": input_action["action.left_hand"], + "robot0_right_gripper": input_action["action.right_hand"], + "robot0_left": input_action["action.left_arm"], + "robot0_right": input_action["action.right_arm"], + "robot0_torso": input_action["action.waist"], + "robot0_head": input_action["action.neck"], + } + return output_action + + @classmethod + def get_missing_keys_in_dumping_dataset(cls): + return { + "body.right_leg": np.zeros(6, dtype=np.float64), + "body.left_leg": np.zeros(6, dtype=np.float64), + } + + @classmethod + def get_metadata(cls, name): + return { + "absolute": True, + "rotation_type": None, + } + + +class PandaOmronKeyConverter(RobotKeyConverter): + @classmethod + def get_camera_config(cls): + mapped_names = [ + "video.res256_image_side_0", + "video.res256_image_side_1", + "video.res256_image_wrist_0", + ] + camera_names = [ + "robot0_agentview_left", + "robot0_agentview_right", + "robot0_eye_in_hand", + ] + camera_widths, camera_heights = 512, 512 + return mapped_names, camera_names, camera_widths, camera_heights + + @classmethod + def map_obs(cls, input_obs): + output_obs = type(input_obs)() + output_obs = { + "hand.gripper_qpos": input_obs["robot0_gripper_qpos"], + "body.base_position": input_obs["robot0_base_pos"], + "body.base_rotation": input_obs["robot0_base_quat"], + "body.end_effector_position_relative": input_obs["robot0_base_to_eef_pos"], + "body.end_effector_rotation_relative": input_obs["robot0_base_to_eef_quat"], + } + return output_obs + + @classmethod + def map_action(cls, input_action): + output_action = type(input_action)() + output_action = { + "hand.gripper_close": ( + np.int64(0) if input_action["robot0_right_gripper"] < 0 else np.int64(1) + ), + "body.end_effector_position": input_action["robot0_right"][..., 0:3], + "body.end_effector_rotation": input_action["robot0_right"][..., 3:6], + "body.base_motion": np.concatenate( + ( + input_action["robot0_base"], + input_action["robot0_torso"], + ), + axis=-1, + ), + "body.control_mode": ( + np.int64(0) if input_action["robot0_base_mode"] < 0 else np.int64(1) + ), + } + return output_action + + @classmethod + def unmap_action(cls, input_action): + output_action = type(input_action)() + output_action = { + "robot0_right_gripper": (-1.0 if input_action["action.gripper_close"] < 0.5 else 1.0), + "robot0_right": np.concatenate( + ( + input_action["action.end_effector_position"], + input_action["action.end_effector_rotation"], + ), + axis=-1, + ), + "robot0_base": input_action["action.base_motion"][..., 0:3], + "robot0_torso": input_action["action.base_motion"][..., 3:4], + "robot0_base_mode": (-1.0 if input_action["action.control_mode"] < 0.5 else 1.0), + } + return output_action + + @classmethod + def get_metadata(cls, name): + # This function and import is only enabled in gr00t exporter + from decoupled_wbc.data.schema.raw.metadata import RotationType + + if name in [ + "body.base_position", + "body.end_effector_position_relative", + "body.end_effector_position", + ]: + return { + "absolute": False, + "rotation_type": None, + } + elif name in ["body.base_rotation", "body.end_effector_rotation_relative"]: + return { + "absolute": False, + "rotation_type": RotationType.QUATERNION, + } + elif name in ["body.end_effector_rotation"]: + return { + "absolute": False, + "rotation_type": RotationType.AXIS_ANGLE, + } + else: + return { + "absolute": True, + "rotation_type": None, + } + + +class PandaPandaKeyConverter(RobotKeyConverter): + @classmethod + def get_camera_config(cls): + mapped_names = [ + "video.agentview_pad_res256_freq20", + "video.robot0_eye_in_hand_pad_res256_freq20", + "video.robot1_eye_in_hand_pad_res256_freq20", + ] + camera_names = [ + "agentview", + "robot0_eye_in_hand", + "robot1_eye_in_hand", + ] + camera_widths, camera_heights = 1280, 800 + return mapped_names, camera_names, camera_widths, camera_heights + + @classmethod + def map_obs(cls, input_obs): + output_obs = type(input_obs)() + output_obs = { + "hand.right_gripper_qpos": input_obs["robot0_right_gripper"], + "hand.left_gripper_qpos": input_obs["robot1_right_gripper"], + "body.right_arm_eef_pos": input_obs["robot0_eef_pos"], + "body.right_arm_eef_quat": input_obs["robot0_eef_quat"], + "body.right_arm_joint_pos": input_obs["robot0_joint_pos"], + "body.left_arm_eef_pos": input_obs["robot1_eef_pos"], + "body.left_arm_eef_quat": input_obs["robot1_eef_quat"], + "body.left_arm_joint_pos": input_obs["robot1_joint_pos"], + } + return output_obs + + @classmethod + def map_action(cls, input_action): + output_action = type(input_action)() + output_action = { + "hand.right_gripper_close": ( + np.int64(0) if input_action["robot0_right_gripper"] < 0 else np.int64(1) + ), + "hand.left_gripper_close": ( + np.int64(0) if input_action["robot1_right_gripper"] < 0 else np.int64(1) + ), + "body.right_arm_eef_pos": input_action["robot0_right"][..., 0:3], + "body.right_arm_eef_rot": input_action["robot0_right"][..., 3:6], + "body.left_arm_eef_pos": input_action["robot1_right"][..., 0:3], + "body.left_arm_eef_rot": input_action["robot1_right"][..., 3:6], + } + return output_action + + @classmethod + def unmap_action(cls, input_action): + output_action = type(input_action)() + output_action = { + "robot0_right_gripper": ( + -1.0 if input_action["action.right_gripper_close"] < 0.5 else 1.0 + ), + "robot1_right_gripper": ( + -1.0 if input_action["action.left_gripper_close"] < 0.5 else 1.0 + ), + "robot0_right": np.concatenate( + ( + input_action["action.right_arm_eef_pos"], + input_action["action.right_arm_eef_rot"], + ), + axis=-1, + ), + "robot1_right": np.concatenate( + ( + input_action["action.left_arm_eef_pos"], + input_action["action.left_arm_eef_rot"], + ), + axis=-1, + ), + } + return output_action + + @classmethod + def get_metadata(cls, name): + # This function and import is only enabled in gr00t exporter + from decoupled_wbc.data.schema.raw.metadata import RotationType + + if name in ["body.right_arm_eef_pos", "body.left_arm_eef_pos"]: + return { + "absolute": False, + "rotation_type": None, + } + elif name in ["body.right_arm_eef_quat", "body.right_arm_eef_quat"]: + return { + "absolute": False, + "rotation_type": RotationType.QUATERNION, + } + elif name in ["body.right_arm_eef_rot", "body.left_arm_eef_rot"]: + return { + "absolute": False, + "rotation_type": RotationType.AXIS_ANGLE, + } + else: + return { + "absolute": True, + "rotation_type": None, + } + + +class PandaDexRHPandaDexRHKeyConverter(RobotKeyConverter): + @classmethod + def get_camera_config(cls): + mapped_names = [ + "video.agentview_pad_res256_freq20", + "video.robot0_eye_in_hand_pad_res256_freq20", + "video.robot1_eye_in_hand_pad_res256_freq20", + ] + camera_names = [ + "agentview", + "robot0_eye_in_hand", + "robot1_eye_in_hand", + ] + camera_widths, camera_heights = 1280, 800 + return mapped_names, camera_names, camera_widths, camera_heights + + @classmethod + def map_obs(cls, input_obs): + output_obs = type(input_obs)() + output_obs = { + "hand.right_hand": input_obs["robot0_right_gripper"], + "hand.left_hand": input_obs["robot1_right_gripper"], + "body.right_arm_eef_pos": input_obs["robot0_eef_pos"], + "body.right_arm_eef_quat": input_obs["robot0_eef_quat"], + "body.right_arm_joint_pos": input_obs["robot0_joint_pos"], + "body.left_arm_eef_pos": input_obs["robot1_eef_pos"], + "body.left_arm_eef_quat": input_obs["robot1_eef_quat"], + "body.left_arm_joint_pos": input_obs["robot1_joint_pos"], + } + return output_obs + + @classmethod + def map_action(cls, input_action): + output_action = type(input_action)() + output_action = { + "hand.right_hand": input_action["robot0_right_gripper"], + "hand.left_hand": input_action["robot1_right_gripper"], + "body.right_arm_eef_pos": input_action["robot0_right"][0:3], + "body.right_arm_eef_rot": input_action["robot0_right"][3:6], + "body.left_arm_eef_pos": input_action["robot1_right"][0:3], + "body.left_arm_eef_rot": input_action["robot1_right"][3:6], + } + return output_action + + @classmethod + def unmap_action(cls, input_action): + output_action = type(input_action)() + output_action = { + "robot0_right_gripper": input_action["action.right_hand"], + "robot1_right_gripper": input_action["action.left_hand"], + "robot0_right": np.concatenate( + ( + input_action["action.right_arm_eef_pos"], + input_action["action.right_arm_eef_rot"], + ), + axis=-1, + ), + "robot1_right": np.concatenate( + ( + input_action["action.left_arm_eef_pos"], + input_action["action.left_arm_eef_rot"], + ), + axis=-1, + ), + } + return output_action + + @classmethod + def get_metadata(cls, name): + # This function and import is only enabled in gr00t exporter + from decoupled_wbc.data.schema.raw.metadata import RotationType + + if name in ["body.right_arm_eef_pos", "body.left_arm_eef_pos"]: + return { + "absolute": False, + "rotation_type": None, + } + elif name in ["body.right_arm_eef_quat", "body.right_arm_eef_quat"]: + return { + "absolute": False, + "rotation_type": RotationType.QUATERNION, + } + elif name in ["body.right_arm_eef_rot", "body.left_arm_eef_rot"]: + return { + "absolute": False, + "rotation_type": RotationType.AXIS_ANGLE, + } + else: + return { + "absolute": True, + "rotation_type": None, + } + + +# The values are only used in gr00t dataset embodiment tag and env name +GR00T_ROBOCASA_ENVS_GR1_ARMS_ONLY = { + "GR1ArmsOnly": "gr1_arms_only_fourier_hands", + "GR1ArmsOnlyInspireHands": "gr1_arms_only_inspire_hands", + "GR1ArmsOnlyFourierHands": "gr1_arms_only_fourier_hands", +} +GR00T_ROBOCASA_ENVS_GR1_ARMS_AND_WAIST = { + "GR1ArmsAndWaistFourierHands": "gr1_arms_waist_fourier_hands", +} +GR00T_ROBOCASA_ENVS_GR1_FIXED_LOWER_BODY = { + "GR1FixedLowerBody": "gr1_fixed_lower_body_fourier_hands", + "GR1FixedLowerBodyInspireHands": "gr1_fixed_lower_body_inspire_hands", + "GR1FixedLowerBodyFourierHands": "gr1_fixed_lower_body_fourier_hands", +} +GR00T_ROBOCASA_ENVS_PANDA = { + "PandaMobile": "panda_mobile", + "PandaOmron": "panda_omron", +} +GR00T_ROBOCASA_ENVS_BIMANUAL_GRIPPER = { + "Panda_Panda": "bimanual_panda_parallel_gripper", +} +GR00T_ROBOCASA_ENVS_BIMANUAL_HAND = { + "PandaDexRH_PandaDexLH": "bimanual_panda_inspire_hand", +} +GR00T_ROBOCASA_ENVS_G1 = { + "G1": "g1_sim", + "G1ArmsOnly": "g1_sim", + "G1FixedLowerBody": "g1_sim", + "G1FixedBase": "g1_sim", +} +GR00T_ROBOCASA_ENVS_ROBOTS = { + **GR00T_ROBOCASA_ENVS_GR1_ARMS_ONLY, + **GR00T_ROBOCASA_ENVS_GR1_ARMS_AND_WAIST, + **GR00T_ROBOCASA_ENVS_GR1_FIXED_LOWER_BODY, + **GR00T_ROBOCASA_ENVS_PANDA, + **GR00T_ROBOCASA_ENVS_BIMANUAL_GRIPPER, + **GR00T_ROBOCASA_ENVS_BIMANUAL_HAND, + **GR00T_ROBOCASA_ENVS_G1, +} +GR00T_LOCOMANIP_ENVS_ROBOTS = {**GR00T_ROBOCASA_ENVS_G1} + + +def make_key_converter(robots_name): + if robots_name in GR00T_ROBOCASA_ENVS_GR1_ARMS_ONLY: + return GR1ArmsOnlyKeyConverter + elif robots_name in GR00T_ROBOCASA_ENVS_GR1_ARMS_AND_WAIST: + return GR1ArmsAndWaistKeyConverter + elif robots_name in GR00T_ROBOCASA_ENVS_GR1_FIXED_LOWER_BODY: + return GR1FixedLowerBodyKeyConverter + elif robots_name in GR00T_ROBOCASA_ENVS_PANDA: + return PandaOmronKeyConverter + elif robots_name in GR00T_ROBOCASA_ENVS_BIMANUAL_GRIPPER: + return PandaPandaKeyConverter + elif robots_name in GR00T_ROBOCASA_ENVS_BIMANUAL_HAND: + return PandaDexRHPandaDexRHKeyConverter + else: + raise ValueError(f"Unknown robot name: {robots_name}") diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/robots/manipulators/__init__.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/robots/manipulators/__init__.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/robots/manipulators/__init__.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/robots/manipulators/__init__.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/robots/manipulators/g1_robot.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/robots/manipulators/g1_robot.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/robots/manipulators/g1_robot.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/robots/manipulators/g1_robot.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/scenes/__init__.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/scenes/__init__.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/scenes/__init__.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/scenes/__init__.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/scenes/factory_arena.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/scenes/factory_arena.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/scenes/factory_arena.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/scenes/factory_arena.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/scenes/ground_arena.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/scenes/ground_arena.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/scenes/ground_arena.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/scenes/ground_arena.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/scenes/lab_arena.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/scenes/lab_arena.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/models/scenes/lab_arena.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/models/scenes/lab_arena.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/__init__.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/__init__.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/__init__.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/__init__.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/camera_utils.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/camera_utils.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/camera_utils.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/camera_utils.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/config_utils.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/config_utils.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/config_utils.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/config_utils.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/cotrain_utils.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/cotrain_utils.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/cotrain_utils.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/cotrain_utils.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/dataset_registry.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/dataset_registry.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/dataset_registry.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/dataset_registry.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/dexmg_utils.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/dexmg_utils.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/dexmg_utils.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/dexmg_utils.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/env_utils.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/env_utils.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/env_utils.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/env_utils.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/gym_utils/__init__.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/gym_utils/__init__.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/gym_utils/__init__.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/gym_utils/__init__.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/gym_utils/gymnasium_basic.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/gym_utils/gymnasium_basic.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/gym_utils/gymnasium_basic.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/gym_utils/gymnasium_basic.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/__init__.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/__init__.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/__init__.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/__init__.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/file_utils.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/file_utils.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/file_utils.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/file_utils.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/fixture_template.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/fixture_template.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/fixture_template.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/fixture_template.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/install_vhacd.sh b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/install_vhacd.sh similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/install_vhacd.sh rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/install_vhacd.sh diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/log_utils.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/log_utils.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/log_utils.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/log_utils.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/mjcf_gen_utils.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/mjcf_gen_utils.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/mjcf_gen_utils.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/mjcf_gen_utils.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/mjcf_obj.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/mjcf_obj.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/mjcf_obj.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/mjcf_obj.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/mtl_utils.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/mtl_utils.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/mtl_utils.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/mtl_utils.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/object_play_env.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/object_play_env.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/object_play_env.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/object_play_env.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/object_template.xml b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/object_template.xml similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/object_template.xml rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/object_template.xml diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/parser_utils.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/parser_utils.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/parser_utils.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/model_zoo/parser_utils.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/object_utils.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/object_utils.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/object_utils.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/object_utils.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/placement_samplers.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/placement_samplers.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/placement_samplers.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/placement_samplers.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/postprocess_xml_utils.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/postprocess_xml_utils.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/postprocess_xml_utils.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/postprocess_xml_utils.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/scene/__init__.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/scene/__init__.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/scene/__init__.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/scene/__init__.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/scene/configs.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/scene/configs.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/scene/configs.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/scene/configs.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/scene/scene.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/scene/scene.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/scene/scene.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/scene/scene.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/scene/success_criteria.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/scene/success_criteria.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/scene/success_criteria.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/scene/success_criteria.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/texture_swap.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/texture_swap.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/texture_swap.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/texture_swap.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/transform_utils.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/transform_utils.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/transform_utils.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/transform_utils.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/README.md b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/README.md similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/README.md rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/README.md diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/__init__.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/__init__.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/__init__.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/__init__.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/component.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/component.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/component.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/component.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/demo.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/demo.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/demo.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/demo.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/exporter.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/exporter.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/exporter.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/exporter.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/exporter_test.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/exporter_test.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/exporter_test.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/exporter_test.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/shapes.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/shapes.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/shapes.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/shapes.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/utils.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/utils.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/utils.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/usd/utils.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/visuals_utls.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/visuals_utls.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/utils/visuals_utls.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/utils/visuals_utls.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/wrappers/__init__.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/wrappers/__init__.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/wrappers/__init__.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/wrappers/__init__.py diff --git a/gr00t_wbc/dexmg/gr00trobocasa/robocasa/wrappers/ik_wrapper.py b/decoupled_wbc/dexmg/gr00trobocasa/robocasa/wrappers/ik_wrapper.py similarity index 100% rename from gr00t_wbc/dexmg/gr00trobocasa/robocasa/wrappers/ik_wrapper.py rename to decoupled_wbc/dexmg/gr00trobocasa/robocasa/wrappers/ik_wrapper.py diff --git a/decoupled_wbc/dexmg/gr00trobocasa/setup.py b/decoupled_wbc/dexmg/gr00trobocasa/setup.py new file mode 100644 index 0000000..ede7806 --- /dev/null +++ b/decoupled_wbc/dexmg/gr00trobocasa/setup.py @@ -0,0 +1,29 @@ +from setuptools import find_packages, setup + + +setup( + name="robocasa", + packages=[package for package in find_packages() if package.startswith("robocasa")], + install_requires=[ + "numpy==1.26.4", + "numba==0.61.0", + "scipy>=1.2.3", + "mujoco==3.2.6", + "pygame", + "Pillow", + "opencv-python", + "pyyaml", + "pynput", + "tqdm", + "termcolor", + "imageio", + "h5py", + "lxml", + "hidapi", + ], + eager_resources=["*"], + include_package_data=True, + python_requires=">=3", + description="Gr00t RoboCasa for loco-manipulation", + version="0.2.0", +) diff --git a/decoupled_wbc/docker/.bashrc b/decoupled_wbc/docker/.bashrc new file mode 100644 index 0000000..999e74e --- /dev/null +++ b/decoupled_wbc/docker/.bashrc @@ -0,0 +1,163 @@ +# ~/.bashrc: executed by bash(1) for non-login shells. +# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc) +# for examples + +# If not running interactively, don't do anything +case $- in + *i*) ;; + *) return;; +esac + +# don't put duplicate lines or lines starting with space in the history. +# See bash(1) for more options +HISTCONTROL=ignoreboth + +# append to the history file, don't overwrite it +shopt -s histappend + +# for setting history length see HISTSIZE and HISTFILESIZE in bash(1) +HISTSIZE=1000 +HISTFILESIZE=2000 + +# check the window size after each command and, if necessary, +# update the values of LINES and COLUMNS. +shopt -s checkwinsize + +# If set, the pattern "**" used in a pathname expansion context will +# match all files and zero or more directories and subdirectories. +#shopt -s globstar + +# make less more friendly for non-text input files, see lesspipe(1) +[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)" + +# set variable identifying the chroot you work in (used in the prompt below) +if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then + debian_chroot=$(cat /etc/debian_chroot) +fi + +# set a fancy prompt (non-color, unless we know we "want" color) +case "$TERM" in + xterm-color|*-256color) color_prompt=yes;; +esac + +# uncomment for a colored prompt, if the terminal has the capability; turned +# off by default to not distract the user: the focus in a terminal window +# should be on the output of commands, not on the prompt +force_color_prompt=yes + +if [ -n "$force_color_prompt" ]; then + if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then + # We have color support; assume it's compliant with Ecma-48 + # (ISO/IEC-6429). (Lack of such support is extremely rare, and such + # a case would tend to support setf rather than setaf.) + color_prompt=yes + else + color_prompt= + fi +fi + +if [ "$color_prompt" = yes ]; then + PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ ' +else + PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ ' +fi +unset color_prompt force_color_prompt + +# If this is an xterm set the title to user@host:dir +case "$TERM" in +xterm*|rxvt*) + PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1" + ;; +*) + ;; +esac + +# enable color support of ls and also add handy aliases +if [ -x /usr/bin/dircolors ]; then + test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)" + alias ls='ls --color=auto' + #alias dir='dir --color=auto' + #alias vdir='vdir --color=auto' + + alias grep='grep --color=auto' + alias fgrep='fgrep --color=auto' + alias egrep='egrep --color=auto' +fi + +# colored GCC warnings and errors +export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01' + +# Set terminal type for color support +export TERM=xterm-256color + +# some more ls aliases +alias ll='ls -alF' +alias la='ls -A' +alias l='ls -CF' + +# Add an "alert" alias for long running commands. Use like so: +# sleep 10; alert +alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"' + +# Alias definitions. +# You may want to put all your additions into a separate file like +# ~/.bash_aliases, instead of adding them here directly. +# See /usr/share/doc/bash-doc/examples in the bash-doc package. + +if [ -f ~/.bash_aliases ]; then + . ~/.bash_aliases +fi + +# enable programmable completion features (you don't need to enable +# this, if it's already enabled in /etc/bash.bashrc and /etc/profile +# sources /etc/bash.bashrc). +if ! shopt -oq posix; then + if [ -f /usr/share/bash-completion/bash_completion ]; then + . /usr/share/bash-completion/bash_completion + elif [ -f /etc/bash_completion ]; then + . /etc/bash_completion + fi +fi + +# useful commands +bind '"\e[A": history-search-backward' +bind '"\e[B": history-search-forward' + +# Store the last 10 directories in a history file +CD_HISTFILE=~/.cd_history +CD_HISTSIZE=10 + +cd() { + local histfile="${CD_HISTFILE:-$HOME/.cd_history}" + local max="${CD_HISTSIZE:-10}" + + case "$1" in + --) [ -f "$histfile" ] && tac "$histfile" | nl -w2 -s' ' || echo "No directory history yet."; return ;; + -[0-9]*) + local idx=${1#-} + local dir=$(tac "$histfile" 2>/dev/null | sed -n "${idx}p") + [ -n "$dir" ] && builtin cd "$dir" || echo "Invalid selection: $1" + return ;; + esac + + builtin cd "$@" || return + + [[ $(tail -n1 "$histfile" 2>/dev/null) != "$PWD" ]] && echo "$PWD" >> "$histfile" + tail -n "$max" "$histfile" > "${histfile}.tmp" && mv "${histfile}.tmp" "$histfile" +} + +# Make decoupled_wbc importable +export PYTHONPATH="${DECOUPLED_WBC_DIR}:${PYTHONPATH}" + +# Manus to LD_LIBRARY_PATH +export LD_LIBRARY_PATH=$DECOUPLED_WBC_DIR/decoupled_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/lib:$LD_LIBRARY_PATH + +# CUDA support +export LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu:/usr/lib:/lib/x86_64-linux-gnu:/lib64:/lib:$LD_LIBRARY_PATH + +# decoupled_wbc aliases +alias dg="python $DECOUPLED_WBC_DIR/decoupled_wbc/scripts/deploy_g1.py" +alias rsl="python $DECOUPLED_WBC_DIR/decoupled_wbc/control/main/teleop/run_sim_loop.py" +alias rgcl="python $DECOUPLED_WBC_DIR/decoupled_wbc/control/main/teleop/run_g1_control_loop.py" +alias rtpl="python $DECOUPLED_WBC_DIR/decoupled_wbc/control/main/teleop/run_teleop_policy_loop.py" +alias tgcl="pytest $DECOUPLED_WBC_DIR/decoupled_wbc/tests/control/main/teleop/test_g1_control_loop.py -s" diff --git a/docker/.tmux.conf b/decoupled_wbc/docker/.tmux.conf similarity index 100% rename from docker/.tmux.conf rename to decoupled_wbc/docker/.tmux.conf diff --git a/docker/70-manus-hid.rules b/decoupled_wbc/docker/70-manus-hid.rules similarity index 100% rename from docker/70-manus-hid.rules rename to decoupled_wbc/docker/70-manus-hid.rules diff --git a/decoupled_wbc/docker/Dockerfile.deploy b/decoupled_wbc/docker/Dockerfile.deploy new file mode 100644 index 0000000..239127c --- /dev/null +++ b/decoupled_wbc/docker/Dockerfile.deploy @@ -0,0 +1,130 @@ +FROM nvgear/ros-2:latest + +# Accept build argument for username +ARG USERNAME +ARG USERID +ARG HOME_DIR +ARG WORKTREE_NAME + +# Create user with the same name as host +RUN if [ "$USERID" != "0" ]; then \ + useradd -m -u ${USERID} -s /bin/bash ${USERNAME} && \ + echo "${USERNAME} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers && \ + # Add user to video and render groups for GPU access + usermod -a -G video,render ${USERNAME} || true; \ + fi + +# Copy .bashrc with color settings before switching user +COPY --chown=${USERNAME}:${USERNAME} decoupled_wbc/docker/.bashrc ${HOME_DIR}/.bashrc + +# Install Manus udev rules +COPY --chown=${USERNAME}:${USERNAME} decoupled_wbc/docker/70-manus-hid.rules /etc/udev/rules.d/70-manus-hid.rules + +# Copy tmux configuration +COPY --chown=${USERNAME}:${USERNAME} decoupled_wbc/docker/.tmux.conf ${HOME_DIR}/.tmux.conf + +# Switch to user +USER ${USERNAME} + +# Install tmux plugin manager and uv in parallel +RUN git clone https://github.com/tmux-plugins/tpm ${HOME_DIR}/.tmux/plugins/tpm & \ + curl -LsSf https://astral.sh/uv/install.sh | env UV_INSTALL_DIR=${HOME_DIR}/.cargo/bin sh & \ + wait + +# Install tmux plugins automatically +RUN ${HOME_DIR}/.tmux/plugins/tpm/bin/install_plugins || true + +# Add uv to PATH +ENV PATH="${HOME_DIR}/.cargo/bin:$PATH" +ENV UV_PYTHON=${HOME_DIR}/venv/bin/python + +# Create venv +RUN uv venv --python 3.10 ${HOME_DIR}/venv + +# Install hardware-specific packages (x86 only - not available on ARM64/Orin) +USER root +COPY --chown=${USERNAME}:${USERNAME} decoupled_wbc/control/teleop/device/pico/XRoboToolkit_PC_Service_1.0.0_ubuntu_22.04_amd64.deb ${HOME_DIR}/XRoboToolkit_PC_Service_1.0.0_ubuntu_22.04_amd64.deb +COPY --chown=${USERNAME}:${USERNAME} decoupled_wbc/control/teleop/device/pico/roboticsservice_1.0.0.0_arm64.deb ${HOME_DIR}/roboticsservice_1.0.0.0_arm64.deb + +RUN if [ "$(dpkg --print-architecture)" = "amd64" ]; then \ + # Ultra Leap setup + wget -qO - https://repo.ultraleap.com/keys/apt/gpg | gpg --dearmor | tee /etc/apt/trusted.gpg.d/ultraleap.gpg && \ + echo 'deb [arch=amd64] https://repo.ultraleap.com/apt stable main' | tee /etc/apt/sources.list.d/ultraleap.list && \ + apt-get update && \ + echo "yes" | DEBIAN_FRONTEND=noninteractive apt-get install -y ultraleap-hand-tracking libhidapi-dev && \ + # Space Mouse udev rules + echo 'KERNEL=="hidraw*", SUBSYSTEM=="hidraw", MODE="0664", GROUP="plugdev"' > /etc/udev/rules.d/99-hidraw-permissions.rules && \ + usermod -aG plugdev ${USERNAME}; \ + # Pico setup + apt-get install -y xdg-utils && \ + dpkg -i ${HOME_DIR}/XRoboToolkit_PC_Service_1.0.0_ubuntu_22.04_amd64.deb; \ + else \ + echo "Skipping x86-only hardware packages on $(dpkg --print-architecture)"; \ + fi + +USER ${USERNAME} +# Install hardware Python packages (x86 only) with caching +RUN --mount=type=cache,target=${HOME_DIR}/.cache/uv,uid=${USERID},gid=${USERID} \ + if [ "$(dpkg --print-architecture)" = "amd64" ]; then \ + # Ultra Leap Python bindings + git clone https://github.com/ultraleap/leapc-python-bindings ${HOME_DIR}/leapc-python-bindings && \ + cd ${HOME_DIR}/leapc-python-bindings && \ + UV_CONCURRENT_DOWNLOADS=8 uv pip install -r requirements.txt && \ + MAKEFLAGS="-j$(nproc)" ${HOME_DIR}/venv/bin/python -m build leapc-cffi && \ + uv pip install leapc-cffi/dist/leapc_cffi-0.0.1.tar.gz && \ + uv pip install -e leapc-python-api && \ + # Space Mouse Python package + uv pip install pyspacemouse && \ + # Pico Python bindings + git clone https://github.com/XR-Robotics/XRoboToolkit-PC-Service-Pybind.git ${HOME_DIR}/XRoboToolkit-PC-Service-Pybind && \ + cd ${HOME_DIR}/XRoboToolkit-PC-Service-Pybind && \ + uv pip install setuptools pybind11 && \ + sed -i "s|pip install|uv pip install|g" setup_ubuntu.sh && \ + sed -i "s|pip uninstall|uv pip uninstall|g" setup_ubuntu.sh && \ + sed -i "s|python setup.py install|${HOME_DIR}/venv/bin/python setup.py install|g" setup_ubuntu.sh && \ + bash setup_ubuntu.sh; \ + fi + +# Install Python dependencies using uv with caching +RUN --mount=type=cache,target=${HOME_DIR}/.cache/uv,uid=${USERID},gid=${USERID} \ + UV_CONCURRENT_DOWNLOADS=8 uv pip install --upgrade pip ipython jupyter notebook debugpy + + +# Copy entire project to the workspace directory where it will be mounted at runtime +# NOTE: The build context must be the project root for this to work +# Use dynamic worktree name to match runtime mount path +COPY --chown=${USERNAME}:${USERNAME} . ${HOME_DIR}/Projects/${WORKTREE_NAME} + +# Install Python dependencies inside the venv with caching - split into separate commands +RUN --mount=type=cache,target=${HOME_DIR}/.cache/uv,uid=${USERID},gid=${USERID} \ + UV_CONCURRENT_DOWNLOADS=8 uv pip install \ + -e ${HOME_DIR}/Projects/${WORKTREE_NAME}/external_dependencies/unitree_sdk2_python + +# Unlike pip, uv downloads LFS files by default. There's a bug in uv that causes LFS files +# to fail to download (https://github.com/astral-sh/uv/issues/3312). So we need to set +# UV_GIT_LFS=1 to prevent uv from downloading LFS files. +# Install project packages (decoupled_wbc + gear_sonic) with caching +RUN --mount=type=cache,target=${HOME_DIR}/.cache/uv,uid=${USERID},gid=${USERID} \ + GIT_LFS_SKIP_SMUDGE=1 UV_CONCURRENT_DOWNLOADS=8 uv pip install \ + -e "${HOME_DIR}/Projects/${WORKTREE_NAME}/decoupled_wbc[full,dev]" \ + -e "${HOME_DIR}/Projects/${WORKTREE_NAME}/gear_sonic[sim]" + +# Clone and install robosuite with specific branch +RUN --mount=type=cache,target=${HOME_DIR}/.cache/uv,uid=${USERID},gid=${USERID} \ + git clone https://github.com/xieleo5/robosuite.git ${HOME_DIR}/robosuite && \ + cd ${HOME_DIR}/robosuite && \ + git checkout leo/support_g1_locomanip && \ + UV_CONCURRENT_DOWNLOADS=8 uv pip install -e . + +# Install gr00trobocasa +RUN --mount=type=cache,target=${HOME_DIR}/.cache/uv,uid=${USERID},gid=${USERID} \ + UV_CONCURRENT_DOWNLOADS=8 uv pip install -e ${HOME_DIR}/Projects/${WORKTREE_NAME}/decoupled_wbc/dexmg/gr00trobocasa + +# Configure bash environment with virtual environment and ROS2 setup +RUN echo "source ${HOME_DIR}/venv/bin/activate" >> ${HOME_DIR}/.bashrc && \ + echo "source /opt/ros/humble/setup.bash" >> ${HOME_DIR}/.bashrc && \ + echo "export ROS_LOCALHOST_ONLY=1" >> ${HOME_DIR}/.bashrc && \ + echo "export PYTHONPATH=${HOME_DIR}/Projects/${WORKTREE_NAME}:\${PYTHONPATH}" >> ${HOME_DIR}/.bashrc + +# Default command (can be overridden at runtime) +CMD ["/bin/bash"] diff --git a/docker/Dockerfile.deploy.base b/decoupled_wbc/docker/Dockerfile.deploy.base similarity index 100% rename from docker/Dockerfile.deploy.base rename to decoupled_wbc/docker/Dockerfile.deploy.base diff --git a/decoupled_wbc/docker/build_deploy_base.sh b/decoupled_wbc/docker/build_deploy_base.sh new file mode 100644 index 0000000..9d4a5d4 --- /dev/null +++ b/decoupled_wbc/docker/build_deploy_base.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +# Multi-architecture Docker build script +# Supports linux/amd64 + +set -e + +# Configuration +IMAGE_NAME="nvgear/ros-2" +TAG="${1:-latest}" +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +DOCKERFILE="$SCRIPT_DIR/Dockerfile.deploy.base" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +echo -e "${GREEN}Building multi-architecture Docker image: ${IMAGE_NAME}:${TAG}${NC}" + +# Ensure we're using the multiarch builder +echo -e "${YELLOW}Setting up multiarch builder...${NC}" +sudo docker buildx use multiarch-builder 2>/dev/null || { + echo -e "${YELLOW}Creating multiarch builder...${NC}" + sudo docker buildx create --name multiarch-builder --use --bootstrap +} + +# Show supported platforms +echo -e "${YELLOW}Supported platforms:${NC}" +sudo docker buildx inspect --bootstrap | grep Platforms + +# Build for multiple architectures +echo -e "${GREEN}Starting multi-arch build...${NC}" +sudo docker buildx build \ + --platform linux/amd64 \ + --file "${DOCKERFILE}" \ + --tag "${IMAGE_NAME}:${TAG}" \ + --push \ + . + +# Alternative: Build and load locally (only works for single platform) +# docker buildx build \ +# --platform linux/amd64 \ +# --file "${DOCKERFILE}" \ +# --tag "${IMAGE_NAME}:${TAG}" \ +# --load \ +# . + +echo -e "${GREEN}Multi-arch build completed successfully!${NC}" +echo -e "${GREEN}Image: ${IMAGE_NAME}:${TAG}${NC}" +echo -e "${GREEN}Platforms: linux/amd64${NC}" + +# Verify the manifest +echo -e "${YELLOW}Verifying multi-arch manifest...${NC}" +sudo docker buildx imagetools inspect "${IMAGE_NAME}:${TAG}" \ No newline at end of file diff --git a/decoupled_wbc/docker/entrypoint/bash.sh b/decoupled_wbc/docker/entrypoint/bash.sh new file mode 100755 index 0000000..4ff46bf --- /dev/null +++ b/decoupled_wbc/docker/entrypoint/bash.sh @@ -0,0 +1,5 @@ +#!/bin/bash +set -e # Exit on error + +echo "Dependencies installed successfully. Starting interactive bash shell..." +exec /bin/bash diff --git a/decoupled_wbc/docker/entrypoint/deploy.sh b/decoupled_wbc/docker/entrypoint/deploy.sh new file mode 100755 index 0000000..b624c6b --- /dev/null +++ b/decoupled_wbc/docker/entrypoint/deploy.sh @@ -0,0 +1,20 @@ +#!/bin/bash +set -e # Exit on error + +# Run the deployment script +# Check for script existence before running +DEPLOY_SCRIPT="decoupled_wbc/scripts/deploy_g1.py" +if [ -f "$DEPLOY_SCRIPT" ]; then + echo "Running deployment script at $DEPLOY_SCRIPT" + echo "Using python from $(which python)" + echo "Deploy args: $@" + exec python "$DEPLOY_SCRIPT" "$@" +else + echo "ERROR: Deployment script not found at $DEPLOY_SCRIPT" + echo "Current directory structure:" + find . -type f -name "*.py" | grep -i deploy + echo "Available script options:" + find . -type f -name "*.py" | sort + echo "Starting a bash shell for troubleshooting..." + exec /bin/bash +fi diff --git a/decoupled_wbc/docker/entrypoint/install_deps.sh b/decoupled_wbc/docker/entrypoint/install_deps.sh new file mode 100755 index 0000000..fd7280d --- /dev/null +++ b/decoupled_wbc/docker/entrypoint/install_deps.sh @@ -0,0 +1,23 @@ +#!/bin/bash +set -e + +# Source virtual environment and ROS2 +source ${HOME}/venv/bin/activate +source /opt/ros/humble/setup.bash +export ROS_LOCALHOST_ONLY=1 + +# Install external dependencies +echo "Current directory: $(pwd)" +echo "Installing dependencies..." + +# Install Unitree SDK and LeRobot +if [ -d "external_dependencies/unitree_sdk2_python" ]; then + cd external_dependencies/unitree_sdk2_python/ + uv pip install -e . --no-deps + cd ../.. +fi + +# Install project packages +if [ -f "decoupled_wbc/pyproject.toml" ]; then + UV_GIT_LFS=1 uv pip install -e "decoupled_wbc[full,dev]" -e "gear_sonic[sim]" +fi diff --git a/docker/image_name.txt b/decoupled_wbc/docker/image_name.txt similarity index 100% rename from docker/image_name.txt rename to decoupled_wbc/docker/image_name.txt diff --git a/docker/kill_all_containers.sh b/decoupled_wbc/docker/kill_all_containers.sh similarity index 100% rename from docker/kill_all_containers.sh rename to decoupled_wbc/docker/kill_all_containers.sh diff --git a/decoupled_wbc/docker/kill_decoupled_wbc_processors.sh b/decoupled_wbc/docker/kill_decoupled_wbc_processors.sh new file mode 100755 index 0000000..b9dd4c1 --- /dev/null +++ b/decoupled_wbc/docker/kill_decoupled_wbc_processors.sh @@ -0,0 +1,192 @@ +#!/bin/bash + +# kill_decoupled_wbc_processors.sh +# Kill decoupled_wbc processes in current container to prevent message passing conflicts + +# Note: Don't use 'set -e' as tmux/pgrep commands may return non-zero exit codes + +# Configuration +DRY_RUN=false +FORCE=false +QUIET=false +declare -A FOUND_PROCESSES + +# Default to verbose mode if no arguments +[[ $# -eq 0 ]] && { QUIET=false; DRY_RUN=false; } + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --dry-run) DRY_RUN=true ;; + --force) FORCE=true ;; + --verbose|-v) VERBOSE=true ;; + --help|-h) + echo "Usage: $0 [--dry-run] [--force] [--verbose] [--help]" + echo "Kill decoupled_wbc processes to prevent message passing conflicts" + exit 0 ;; + *) echo "Unknown option: $1"; exit 1 ;; + esac + shift +done + +# Colors (only if not quiet) +if [[ "$QUIET" != true ]]; then + RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; BLUE='\033[0;34m'; NC='\033[0m' +else + RED=''; GREEN=''; YELLOW=''; BLUE=''; NC='' +fi + +# Show processes by pattern (for preview) +show_processes_by_pattern() { + local pattern="$1" desc="$2" + local pids=$(pgrep -f "$pattern" 2>/dev/null || true) + + [[ -z "$pids" ]] && return 0 + + echo -e "${YELLOW}$desc processes:${NC}" + + for pid in $pids; do + local cmd=$(ps -p $pid -o cmd= 2>/dev/null || echo "Process not found") + echo " PID $pid: $cmd" + done +} + +# Kill processes by pattern (silent killing) +kill_by_pattern() { + local pattern="$1" desc="$2" signal="${3:-TERM}" + local pids=$(pgrep -f "$pattern" 2>/dev/null || true) + + [[ -z "$pids" ]] && return 0 + + for pid in $pids; do + # Kill if not dry run + [[ "$DRY_RUN" != true ]] && kill -$signal $pid 2>/dev/null + done +} + +# Show tmux sessions (for preview) +show_tmux() { + local pattern="$1" + local sessions=$(tmux list-sessions 2>/dev/null | grep "$pattern" | cut -d: -f1 || true) + + [[ -z "$sessions" ]] && return 0 + + echo -e "${YELLOW}Tmux sessions:${NC}" + + for session in $sessions; do + echo " Session: $session" + done +} + +# Kill tmux sessions (silent killing) +kill_tmux() { + local pattern="$1" + local sessions=$(tmux list-sessions 2>/dev/null | grep "$pattern" | cut -d: -f1 || true) + + [[ -z "$sessions" ]] && return 0 + + for session in $sessions; do + [[ "$DRY_RUN" != true ]] && tmux kill-session -t "$session" 2>/dev/null + done +} + +# Show processes by port (for preview) +show_processes_by_port() { + local port="$1" desc="$2" + local pids=$(lsof -ti:$port 2>/dev/null || true) + + [[ -z "$pids" ]] && return 0 + + echo -e "${YELLOW}$desc (port $port):${NC}" + + for pid in $pids; do + local cmd=$(ps -p $pid -o cmd= 2>/dev/null || echo "Process not found") + echo " PID $pid: $cmd" + done +} + +# Kill processes by port (silent killing) +kill_by_port() { + local port="$1" desc="$2" + local pids=$(lsof -ti:$port 2>/dev/null || true) + + [[ -z "$pids" ]] && return 0 + + for pid in $pids; do + [[ "$DRY_RUN" != true ]] && kill -TERM $pid 2>/dev/null + done +} + +# Check if any processes exist +has_processes() { + # Check for processes + local has_tmux=$(tmux list-sessions 2>/dev/null | grep "g1_deployment" || true) + local has_control=$(pgrep -f "run_g1_control_loop.py" 2>/dev/null || true) + local has_teleop=$(pgrep -f "run_teleop_policy_loop.py" 2>/dev/null || true) + local has_camera=$(pgrep -f "camera_forwarder.py" 2>/dev/null || true) + local has_rqt=$(pgrep -f "rqt.*image_view" 2>/dev/null || true) + local has_port=$(lsof -ti:5555 2>/dev/null || true) + + [[ -n "$has_tmux" || -n "$has_control" || -n "$has_teleop" || -n "$has_camera" || -n "$has_rqt" || -n "$has_port" ]] +} + +# Main execution +main() { + # Check if any processes exist first + if ! has_processes; then + # No processes to kill, exit silently + exit 0 + fi + + # Show header and processes to be killed + if [[ "$QUIET" != true ]]; then + echo -e "${BLUE}=== decoupled_wbc Process Killer ===${NC}" + [[ "$DRY_RUN" == true ]] && echo -e "${BLUE}=== DRY RUN MODE ===${NC}" + + # Show what will be killed + show_tmux "g1_deployment" + show_processes_by_pattern "run_g1_control_loop.py" "G1 control loop" + show_processes_by_pattern "run_teleop_policy_loop.py" "Teleop policy" + show_processes_by_pattern "camera_forwarder.py" "Camera forwarder" + show_processes_by_pattern "rqt.*image_view" "RQT viewer" + show_processes_by_port "5555" "Inference server" + + # Ask for confirmation + if [[ "$FORCE" != true && "$DRY_RUN" != true ]]; then + echo + echo -e "${RED}WARNING: This will terminate the above decoupled_wbc processes!${NC}" + read -p "Continue? [Y/n]: " -n 1 -r + echo + # Default to Y - only abort if user explicitly types 'n' or 'N' + [[ $REPLY =~ ^[Nn]$ ]] && { echo "Aborted."; exit 0; } + fi + echo + fi + + # Kill processes (silently) + kill_tmux "g1_deployment" + kill_by_pattern "run_g1_control_loop.py" "G1 control loop" + kill_by_pattern "run_teleop_policy_loop.py" "Teleop policy" + kill_by_pattern "camera_forwarder.py" "Camera forwarder" + kill_by_pattern "rqt.*image_view" "RQT viewer" + kill_by_port "5555" "Inference server" + + # Force kill remaining (SIGKILL) + [[ "$DRY_RUN" != true ]] && { + sleep 1 + kill_by_pattern "run_g1_control_loop.py" "G1 control loop" "KILL" + kill_by_pattern "run_teleop_policy_loop.py" "Teleop policy" "KILL" + kill_by_pattern "camera_forwarder.py" "Camera forwarder" "KILL" + } + + # Summary (unless quiet) + [[ "$QUIET" != true ]] && { + if [[ "$DRY_RUN" == true ]]; then + echo -e "${BLUE}=== DRY RUN COMPLETE ===${NC}" + else + echo -e "${GREEN}All decoupled_wbc processes terminated${NC}" + fi + } +} + +main "$@" diff --git a/docker/publish.sh b/decoupled_wbc/docker/publish.sh similarity index 100% rename from docker/publish.sh rename to decoupled_wbc/docker/publish.sh diff --git a/decoupled_wbc/docker/run_docker.sh b/decoupled_wbc/docker/run_docker.sh new file mode 100755 index 0000000..b53db26 --- /dev/null +++ b/decoupled_wbc/docker/run_docker.sh @@ -0,0 +1,455 @@ +#!/bin/bash + +# Docker run script for decoupled_wbc with branch-based container isolation +# +# Usage: +# ./docker/run_docker.sh [OPTIONS] (run from inside decoupled_wbc/) +# +# Options: +# --build Build Docker image +# --clean Clean containers +# --deploy Run in deploy mode +# --install Pull prebuilt Docker image +# --push Push built image to Docker Hub +# --branch Use branch-specific container names +# +# Branch-based Container Isolation (when --branch flag is used): +# - Each git branch gets its own isolated containers +# - Container names include branch identifier (e.g., decoupled_wbc-deploy-user-main) +# - Works with git worktrees, separate clones, or nested repositories +# - Clean and build operations only affect the current branch + +# Exit on error +set -e + +# Default values +BUILD=false +CLEAN=false +DEPLOY=false +INSTALL=false +# Flag to push the built Docker image to Docker Hub +# This should be used when someone updates the Docker image dependencies +# because this image is used for CI/CD pipelines +# When true, the image will be tagged and pushed to docker.io/nvgear/gr00t_wbc:latest +DOCKER_HUB_PUSH=false +# Flag to build the docker with root user +# This could cause some of your local files to be owned by root +# If you get error like "PermissionError: [Errno 13] Permission denied:" +# You can run `sudo chown -R $USER:$USER .` in local machine to fix it +ROOT=false +BRANCH_MODE=false +EXTRA_ARGS=() +PROJECT_NAME="decoupled_wbc" +PROJECT_SLUG=$(echo "$PROJECT_NAME" | tr '[:upper:]' '[:lower:]') +REMOTE_IMAGE="nvgear/gr00t_wbc:latest" + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + --build) + BUILD=true + shift + ;; + --clean) + CLEAN=true + shift + ;; + --deploy) + DEPLOY=true + shift + ;; + --install) + INSTALL=true + shift + ;; + --push) + DOCKER_HUB_PUSH=true + shift + ;; + --root) + ROOT=true + shift + ;; + --branch) + BRANCH_MODE=true + shift + ;; + *) + # Collect all unknown arguments as extra args for the deployment script + EXTRA_ARGS+=("$1") + shift + ;; + esac +done + +if [ "$INSTALL" = true ] && [ "$BUILD" = true ]; then + echo "Cannot use --install and --build together. Choose one." + exit 1 +fi + + +# Function to get branch name for container naming +function get_branch_id { + # Check if we're in a git repository + if git rev-parse --is-inside-work-tree > /dev/null 2>&1; then + # Get current branch name (returns "HEAD" in detached state) + local branch_name=$(git rev-parse --abbrev-ref HEAD) + # Replace forward slashes with dashes for valid container names + echo "${branch_name//\//-}" + else + # Default: no branch identifier (not in git repo) + echo "" + fi +} + +# Architecture detection helpers +is_arm64() { [ "$(dpkg --print-architecture)" = "arm64" ]; } +is_amd64() { [ "$(dpkg --print-architecture)" = "amd64" ]; } + +# Get current user's username and UID +if [ "$ROOT" = true ]; then + USERNAME=root + USERID=0 + DOCKER_HOME_DIR=/root + CACHE_FROM=${PROJECT_SLUG}-deploy-cache-root +else + USERNAME=$(whoami) + USERID=$(id -u) + DOCKER_HOME_DIR=/home/${USERNAME} + CACHE_FROM=${PROJECT_SLUG}-deploy-cache +fi +# Get input group ID for device access +INPUT_GID=$(getent group input | cut -d: -f3) + +# Get script directory for path calculations +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +# Function to get the actual project directory (worktree-aware) +function get_project_dir { + # For worktrees, use the actual worktree root path + if git rev-parse --is-inside-work-tree > /dev/null 2>&1; then + git rev-parse --show-toplevel + else + # Fallback to script-based detection (go up two levels: docker/ -> decoupled_wbc/ -> project root) + dirname "$(dirname "$SCRIPT_DIR")" + fi +} + +# Get branch identifier +BRANCH_ID=$(get_branch_id) + +# Set project directory (needs to be after branch detection) +PROJECT_DIR="$(get_project_dir)" + +# Function to generate container name with optional branch support +function get_container_name { + local container_type="$1" + if [[ -n "$BRANCH_ID" ]] && [[ "$BRANCH_MODE" = true ]]; then + echo "${PROJECT_SLUG}-${container_type}-${USERNAME}-${BRANCH_ID}" + else + echo "${PROJECT_SLUG}-${container_type}-${USERNAME}" + fi +} + +# Set common variables used throughout the script +DEPLOY_CONTAINER=$(get_container_name "deploy") +BASH_CONTAINER=$(get_container_name "bash") +WORKTREE_NAME=$(basename "$PROJECT_DIR") + +# Debug output for branch detection +if [[ -n "$BRANCH_ID" ]] && [[ "$BRANCH_MODE" = true ]]; then + echo "Branch mode enabled - using branch: $BRANCH_ID" + echo "Project directory: $PROJECT_DIR" +elif [[ -n "$BRANCH_ID" ]]; then + echo "Branch mode disabled - using default containers" + echo "Project directory: $PROJECT_DIR" +else + echo "Running outside git repository" + echo "Project directory: $PROJECT_DIR" +fi + +# Get host's hostname and append -docker +HOSTNAME=$(hostname)-docker + +function clean_container { + echo "Cleaning up Docker containers..." + + # Stop containers + sudo docker stop $DEPLOY_CONTAINER 2>/dev/null || true + sudo docker stop $BASH_CONTAINER 2>/dev/null || true + # Remove containers + echo "Removing containers..." + sudo docker rm $DEPLOY_CONTAINER 2>/dev/null || true + sudo docker rm $BASH_CONTAINER 2>/dev/null || true + echo "Containers cleaned!" +} + + +# Function to install Docker Buildx if needed +function install_docker_buildx { + # Check if Docker Buildx is already installed + if sudo docker buildx version &> /dev/null; then + echo "Docker Buildx is already installed." + return 0 + fi + + echo "Installing Docker Buildx..." + + # Create directories and detect architecture + mkdir -p ~/.docker/cli-plugins/ && sudo mkdir -p /root/.docker/cli-plugins/ + ARCH=$(dpkg --print-architecture) + [[ "$ARCH" == "arm64" ]] && BUILDX_ARCH="linux-arm64" || BUILDX_ARCH="linux-amd64" + + # Get version (with fallback) + BUILDX_VERSION=$(curl -s https://api.github.com/repos/docker/buildx/releases/latest | grep tag_name | cut -d '"' -f 4) + BUILDX_VERSION=${BUILDX_VERSION:-v0.13.1} + + # Download and install for both user and root + curl -L "https://github.com/docker/buildx/releases/download/${BUILDX_VERSION}/buildx-${BUILDX_VERSION}.${BUILDX_ARCH}" -o ~/.docker/cli-plugins/docker-buildx + sudo cp ~/.docker/cli-plugins/docker-buildx /root/.docker/cli-plugins/docker-buildx + chmod +x ~/.docker/cli-plugins/docker-buildx && sudo chmod +x /root/.docker/cli-plugins/docker-buildx + + # Create builder + sudo docker buildx create --use --name mybuilder || true + sudo docker buildx inspect --bootstrap + + echo "Docker Buildx installation complete!" +} + +# Function to install NVIDIA Container Toolkit if needed +function install_nvidia_toolkit { + # Check if NVIDIA Container Toolkit is already installed + if command -v nvidia-container-toolkit &> /dev/null; then + echo "NVIDIA Container Toolkit is already installed." + return 0 + fi + + echo "Installing NVIDIA Container Toolkit..." + + # Add the package repositories + distribution=$(. /etc/os-release;echo $ID$VERSION_ID) + + # Check if GPG key exists and remove it if it does + if [ -f "/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg" ]; then + echo "Removing existing NVIDIA GPG key..." + sudo rm /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg + fi + + # Add new GPG key + curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg + + # Add repository + curl -s -L https://nvidia.github.io/nvidia-container-runtime/$distribution/nvidia-container-runtime.list | \ + sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ + sudo tee /etc/apt/sources.list.d/nvidia-container-runtime.list + + # Install nvidia-container-toolkit and docker if needed + sudo apt-get update + sudo apt-get install -y nvidia-container-toolkit + + # Install docker if not already installed + if ! command -v docker &> /dev/null; then + sudo apt-get install -y docker.io + fi + + # Configure Docker to use the NVIDIA runtime + sudo nvidia-ctk runtime configure --runtime=docker + + # Restart the Docker daemon + sudo systemctl restart docker + + echo "NVIDIA Container Toolkit installation complete!" +} + + +# Function to build Docker image for current branch +function build_docker_image { + echo "Building Docker image: $DEPLOY_CONTAINER" + + sudo docker buildx build \ + --build-arg USERNAME=$USERNAME \ + --build-arg USERID=$USERID \ + --build-arg HOME_DIR=$DOCKER_HOME_DIR \ + --build-arg WORKTREE_NAME=$WORKTREE_NAME \ + --cache-from $CACHE_FROM \ + -t $DEPLOY_CONTAINER \ + -f "$SCRIPT_DIR/Dockerfile.deploy" \ + --load \ + "$PROJECT_DIR" + + # Tag for persistent cache + # sudo docker tag $DEPLOY_CONTAINER $CACHE_FROM + echo "Docker image build complete!" +} + +# Build function +function build_with_cleanup { + echo "Building Docker image..." + echo "Removing existing containers and images..." + clean_container + # Tag for persistent cache before deleting the image + sudo docker tag $DEPLOY_CONTAINER $CACHE_FROM 2>/dev/null || true + sudo docker rmi $DEPLOY_CONTAINER 2>/dev/null || true + echo "Images cleaned!" + + install_docker_buildx + install_nvidia_toolkit + build_docker_image +} + +function install_remote_image { + echo "Installing Docker image from remote registry: $REMOTE_IMAGE" + echo "Removing existing containers to ensure a clean install..." + clean_container + sudo docker pull "$REMOTE_IMAGE" + sudo docker tag "$REMOTE_IMAGE" "$DEPLOY_CONTAINER" + sudo docker tag "$REMOTE_IMAGE" "$CACHE_FROM" 2>/dev/null || true + echo "Docker image install complete!" +} + +# Clean up if requested +if [ "$CLEAN" = true ]; then + clean_container + exit 0 +fi + +# Build if requested +if [ "$BUILD" = true ]; then + build_with_cleanup +fi + +if [ "$INSTALL" = true ]; then + install_remote_image +fi + +if [ "$DOCKER_HUB_PUSH" = true ]; then + echo "Pushing Docker image to Docker Hub: docker.io/${REMOTE_IMAGE}" + sudo docker tag $DEPLOY_CONTAINER docker.io/${REMOTE_IMAGE} + sudo docker push docker.io/${REMOTE_IMAGE} + echo "Docker image pushed to Docker Hub!" + exit 0 +fi + +# Setup X11 display forwarding +setup_x11() { + # Set display if missing and X server available + if [ -z "$DISPLAY" ] && command -v xset >/dev/null 2>&1 && xset q >/dev/null 2>&1; then + export DISPLAY=:1 + echo "No DISPLAY set, using :1" + fi + + # Enable X11 forwarding if possible + if [ -n "$DISPLAY" ] && command -v xhost >/dev/null 2>&1 && xhost +local:docker 2>/dev/null; then + echo "X11 forwarding enabled" + return 0 + else + echo "Headless environment - X11 disabled" + export DISPLAY="" + return 1 + fi +} + +X11_ENABLED=false +setup_x11 && X11_ENABLED=true + +# Mount entire /dev directory for dynamic device access (including hidraw for joycon) +# This allows JoyCon controllers to be detected even when connected after container launch +sudo chmod g+r+w /dev/input/* + +# Detect GPU setup and set appropriate environment variables +echo "Detecting GPU setup..." +GPU_ENV_VARS="" + +# Check if we have both integrated and discrete GPUs (hybrid/Optimus setup) +HAS_AMD_GPU=$(lspci | grep -i "vga\|3d\|display" | grep -i amd | wc -l) +HAS_INTEL_GPU=$(lspci | grep -i "vga\|3d\|display" | grep -i intel | wc -l) +HAS_NVIDIA_GPU=$(lspci | grep -i "vga\|3d\|display" | grep -i nvidia | wc -l) + +if [[ "$HAS_INTEL_GPU" -gt 0 ]] || [[ "$HAS_AMD_GPU" -gt 0 ]] && [[ "$HAS_NVIDIA_GPU" -gt 0 ]]; then + echo "Detected hybrid GPU setup (Intel/AMD integrated + NVIDIA discrete)" + echo "Setting NVIDIA Optimus environment variables for proper rendering offload..." + GPU_ENV_VARS="-e __NV_PRIME_RENDER_OFFLOAD=1 \ + -e __VK_LAYER_NV_optimus=NVIDIA_only" +else + GPU_ENV_VARS="" +fi + +# Set GPU runtime based on architecture +if is_arm64; then + echo "Detected ARM64 architecture (Jetson Orin), using device access instead of nvidia runtime..." + GPU_RUNTIME_ARGS="--device /dev/nvidia0 --device /dev/nvidiactl --device /dev/nvidia-modeset --device /dev/nvidia-uvm --device /dev/nvidia-uvm-tools" +else + GPU_RUNTIME_ARGS="--gpus all --runtime=nvidia" +fi + +# Common Docker run parameters +DOCKER_RUN_ARGS="--hostname $HOSTNAME \ + --user $USERNAME \ + --group-add $INPUT_GID \ + $GPU_RUNTIME_ARGS \ + --ipc=host \ + --network=host \ + --privileged \ + --device=/dev \ + $GPU_ENV_VARS \ + -p 5678:5678 \ + -e DISPLAY=$DISPLAY \ + -e NVIDIA_VISIBLE_DEVICES=all \ + -e NVIDIA_DRIVER_CAPABILITIES=graphics,compute,utility \ + -e __GLX_VENDOR_LIBRARY_NAME=nvidia \ + -e USERNAME=$USERNAME \ + -e DECOUPLED_WBC_DIR="$DOCKER_HOME_DIR/Projects/$WORKTREE_NAME" \ + -e PYTHONPATH="$DOCKER_HOME_DIR/Projects/$WORKTREE_NAME" \ + -v /dev/bus/usb:/dev/bus/usb \ + -v /tmp/.X11-unix:/tmp/.X11-unix \ + -v $HOME/.ssh:$DOCKER_HOME_DIR/.ssh \ + -v $HOME/.gear:$DOCKER_HOME_DIR/.gear \ + -v $HOME/.Xauthority:$DOCKER_HOME_DIR/.Xauthority \ + -v $PROJECT_DIR:$DOCKER_HOME_DIR/Projects/$(basename "$PROJECT_DIR") + --device /dev/snd \ + --group-add audio \ + -e PULSE_SERVER=unix:/run/user/$(id -u)/pulse/native \ + -v /run/user/$(id -u)/pulse/native:/run/user/$(id -u)/pulse/native \ + -v $HOME/.config/pulse/cookie:/home/$USERNAME/.config/pulse/cookie" + +# Check if RL mode first, then handle container logic +if [ "$DEPLOY" = true ]; then + # Deploy mode - use decoupled_wbc-deploy-${USERNAME} container + + # Always clean up old processes and create a new container + # Kill all decoupled_wbc processes across containers to prevent message passing conflicts + "$SCRIPT_DIR/kill_decoupled_wbc_processors.sh" + echo "Creating new deploy container..." + + # Clean up old processes and create a fresh deploy container + # Remove existing deploy container if it exists + if sudo docker ps -a --format '{{.Names}}' | grep -q "^$DEPLOY_CONTAINER$"; then + echo "Removing existing deploy container..." + sudo docker rm -f $DEPLOY_CONTAINER + fi + sudo docker run -it --rm $DOCKER_RUN_ARGS \ + -w $DOCKER_HOME_DIR/Projects/$WORKTREE_NAME \ + --name $DEPLOY_CONTAINER \ + $DEPLOY_CONTAINER \ + /bin/bash -ic 'exec "$0" "$@"' \ + "${DOCKER_HOME_DIR}/Projects/${WORKTREE_NAME}/decoupled_wbc/docker/entrypoint/deploy.sh" \ + "${EXTRA_ARGS[@]}" +else + # Bash mode - use decoupled_wbc-bash-${USERNAME} container + if sudo docker ps -a --format '{{.Names}}' | grep -q "^$BASH_CONTAINER$"; then + echo "Bash container exists, starting it..." + sudo docker start $BASH_CONTAINER > /dev/null + sudo docker exec -it $BASH_CONTAINER /bin/bash + else + echo "Creating new bash container with auto-install decoupled_wbc..." + sudo docker run -it $DOCKER_RUN_ARGS \ + -w $DOCKER_HOME_DIR/Projects/$WORKTREE_NAME \ + --name $BASH_CONTAINER \ + $DEPLOY_CONTAINER \ + /bin/bash -ic 'exec "$0"' \ + "${DOCKER_HOME_DIR}/Projects/${WORKTREE_NAME}/decoupled_wbc/docker/entrypoint/bash.sh" + fi +fi + +# Cleanup X11 permissions +$X11_ENABLED && xhost -local:docker 2>/dev/null diff --git a/decoupled_wbc/pyproject.toml b/decoupled_wbc/pyproject.toml new file mode 100644 index 0000000..ff208fd --- /dev/null +++ b/decoupled_wbc/pyproject.toml @@ -0,0 +1,93 @@ +[build-system] +requires = ["setuptools>=67", "wheel", "pip"] +build-backend = "setuptools.build_meta" + +[project] +name = "decoupled_wbc" +dynamic = ["version"] +readme = "../README.md" +classifiers = [ + "Intended Audience :: Science/Research", + "Development Status :: 3 - Alpha", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3", + "Topic :: Scientific/Engineering :: Artificial Intelligence", +] +authors = [ + {name = "NVIDIA Gear Lab"} +] +requires-python = "~=3.10.0" +dependencies = [ + "numpy==1.26.4", + "scipy==1.15.3", + "torch", +] +license = {file = "../LICENSE"} + +[project.optional-dependencies] +# Full: all dependencies for the complete decoupled_wbc project +# Usage: pip install -e "decoupled_wbc[full]" +full = [ + "av>=14.2", + "pyttsx3==2.90", + "matplotlib", + "hydra-core", + "ray[default]", + "click", + "gymnasium", + "mujoco", + "termcolor", + "flask", + "python-socketio>=5.13.0", + "flask_socketio", + "loguru", + "meshcat", + "meshcat-shapes", + "onnxruntime", + "rerun-sdk==0.21.0", + "pygame", + "sshkeyboard", + "msgpack", + "msgpack-numpy", + "pyzmq", + "PyQt6; platform_machine != 'aarch64'", + "pin", + "pin-pink", + "pyrealsense2; sys_platform != 'darwin'", + "pyrealsense2-macosx; sys_platform == 'darwin'", + "qpsolvers[osqp,quadprog]", + "tyro", + "cv-bridge", + "lark", + "lerobot @ git+https://github.com/huggingface/lerobot.git@a445d9c9da6bea99a8972daa4fe1fdd053d711d2", + "datasets==3.6.0", + "pandas", + "evdev; sys_platform == 'linux'", + "pyyaml", +] +dev = [ + "pytest==7.4.0", + "build", + "setuptools", + "wheel", + "ruff", + "black", + "ipdb", +] + +[project.scripts] +decoupled_wbc = "decoupled_wbc.control.teleop.gui.cli:cli" + +[tool.setuptools.packages.find] +where = [".."] +include = ["decoupled_wbc*"] + +[tool.setuptools] +include-package-data = true + +[tool.setuptools.package-data] +decoupled_wbc = ["py.typed", "**/*.json", "**/*.yaml"] + +[tool.setuptools.dynamic] +version = {attr = "decoupled_wbc.version.VERSION"} + diff --git a/decoupled_wbc/scripts/deploy_g1.py b/decoupled_wbc/scripts/deploy_g1.py new file mode 100644 index 0000000..b2fa6be --- /dev/null +++ b/decoupled_wbc/scripts/deploy_g1.py @@ -0,0 +1,472 @@ +from pathlib import Path +import signal +import subprocess +import sys +import time + +import tyro + +from decoupled_wbc.control.main.teleop.configs.configs import DeploymentConfig +from decoupled_wbc.control.utils.run_real_checklist import show_deployment_checklist + + +class G1Deployment: + """ + Unified deployment manager for G1 robot with one-click operation. + Handles camera setup, control loop, teleoperation, and data collection. + Uses tmux for process management and I/O handling. + """ + + def __init__(self, config: DeploymentConfig): + self.config = config + + # Process directories + self.project_root = Path(__file__).resolve().parent.parent + + # Tmux session name + self.session_name = "g1_deployment" + + # Create tmux session if it doesn't exist + self._create_tmux_session() + + def _create_tmux_session(self): + """Create a new tmux session if it doesn't exist""" + # Check if session exists + result = subprocess.run( + ["tmux", "has-session", "-t", self.session_name], capture_output=True, text=True + ) + + if result.returncode != 0: + # Create new session + subprocess.run(["tmux", "new-session", "-d", "-s", self.session_name]) + print(f"Created new tmux session: {self.session_name}") + + # Set up the default window for control, data collection, and teleop + # First rename the default window (which is 0) to our desired name + subprocess.run( + ["tmux", "rename-window", "-t", f"{self.session_name}:0", "control_data_teleop"] + ) + # Split the window horizontally (left and right) + subprocess.run(["tmux", "split-window", "-t", f"{self.session_name}:0", "-h"]) + # Split the right pane vertically (top and bottom) + subprocess.run(["tmux", "split-window", "-t", f"{self.session_name}:0.1", "-v"]) + # Select the left pane (control) + subprocess.run(["tmux", "select-pane", "-t", f"{self.session_name}:0.0"]) + + def _run_in_tmux(self, name, cmd, wait_time=2, pane_index=None): + """Run a command in a new tmux window or pane""" + if pane_index is not None: + # Run in existing window's pane + target = f"{self.session_name}:0.{pane_index}" + else: + # Create new window + subprocess.run(["tmux", "new-window", "-t", self.session_name, "-n", name]) + target = f"{self.session_name}:{name}" + + # Set up trap for Ctrl+\ in the window + trap_cmd = f"trap 'tmux kill-session -t {self.session_name}' QUIT" + + # Set environment variable for the tmux session name + env_cmd = f"export DECOUPLED_WBC_TMUX_SESSION={self.session_name}" + + # Construct the command with proper escaping and trap + cmd_str = " ".join(str(x) for x in cmd) + full_cmd = f"{trap_cmd}; {env_cmd}; {cmd_str}" + + # Send command to tmux window/pane + subprocess.run(["tmux", "send-keys", "-t", target, full_cmd, "C-m"]) + + # Wait for process to start + time.sleep(wait_time) + + # Check if process is still running + result = subprocess.run( + ["tmux", "list-panes", "-t", target, "-F", "#{pane_dead}"], + capture_output=True, + text=True, + ) + + if result.stdout.strip() == "1": + print(f"ERROR: {name} failed to start!") + return False + + return True + + def start_camera_sensor(self): + """Start the camera sensor in local mode if we are using replay video""" + if self.config.egoview_replay_dummy is None and self.config.head_replay_dummy is None: + return + + print("Starting camera sensor in local mode...") + cmd = [ + sys.executable, + str(self.project_root / "control/sensor/composed_camera.py"), + "--egoview_camera", + self.config.egoview_replay_dummy, + "--head_camera", + self.config.head_replay_dummy, + "--port", + str(self.config.camera_port), + "--host", + "localhost", + ] + + if not self._run_in_tmux("camera_sensor", cmd): + print("ERROR: Camera sensor failed to start!") + print("Continuing without camera sensor...") + else: + print("Camera sensor started successfully.") + + def start_camera_viewer(self): + """Start the ROS rqt camera viewer""" + if not self.config.view_camera: + return + + print("Starting camera viewer...") + # Use rqt directly instead of ros2 run + cmd = [ + sys.executable, + str(self.project_root / "control/main/teleop/run_camera_viewer.py"), + "--camera_host", + self.config.camera_host, + "--camera_port", + str(self.config.camera_port), + "--fps", + str(self.config.fps), + ] + + if not self._run_in_tmux("camera_viewer", cmd): + print("ERROR: Camera viewer failed to start!") + print("Continuing without camera viewer...") + else: + print("Camera viewer started successfully.") + + def start_sim_loop(self): + """Start the simulation loop in a separate process""" + print("Starting simulation loop...") + cmd = [ + sys.executable, + str(self.project_root / "control/main/teleop/run_sim_loop.py"), + "--wbc_version", + self.config.wbc_version, + "--interface", + self.config.interface, + "--simulator", + self.config.simulator, + "--sim_frequency", + str(self.config.sim_frequency), + "--env_name", + self.config.env_name, + "--camera_port", + str(self.config.camera_port), + ] + + # Handle boolean flags + if self.config.enable_waist: + cmd.append("--enable_waist") + else: + cmd.append("--no-enable_waist") + + if self.config.with_hands: + cmd.append("--with_hands") + else: + cmd.append("--no-with_hands") + + if self.config.image_publish: + cmd.append("--enable_image_publish") + cmd.append("--enable_offscreen") + else: + cmd.append("--no-enable_image_publish") + + if self.config.enable_onscreen: + cmd.append("--enable_onscreen") + else: + cmd.append("--no-enable_onscreen") + + if not self._run_in_tmux("sim_loop", cmd, wait_time=5): + print("ERROR: Simulation loop failed to start!") + self.cleanup() + sys.exit(1) + + print("Simulation loop started successfully. Waiting for warmup for 10 seconds...") + time.sleep(10) # Wait for sim loop to warm up + + def start_control_loop(self): + """Start the G1 control loop""" + print("Starting G1 control loop...") + cmd = [ + sys.executable, + str(self.project_root / "control/main/teleop/run_g1_control_loop.py"), + "--wbc_version", + self.config.wbc_version, + "--wbc_model_path", + self.config.wbc_model_path, + "--wbc_policy_class", + self.config.wbc_policy_class, + "--interface", + self.config.interface, + "--simulator", + "None" if self.config.sim_in_single_process else self.config.simulator, + "--control_frequency", + str(self.config.control_frequency), + ] + + # Handle boolean flag using presence/absence pattern + if self.config.enable_waist: + cmd.append("--enable_waist") + else: + cmd.append("--no-enable_waist") + + if self.config.with_hands: + cmd.append("--with_hands") + else: + cmd.append("--no-with_hands") + + if self.config.high_elbow_pose: + cmd.append("--high_elbow_pose") + else: + cmd.append("--no-high_elbow_pose") + + # Gravity compensation configuration + # Note: This is where gravity compensation is actually applied since the control loop + # contains the G1Body that interfaces directly with the robot motors + if self.config.enable_gravity_compensation: + cmd.append("--enable_gravity_compensation") + # Add joint groups if specified + if self.config.gravity_compensation_joints: + cmd.extend( + ["--gravity_compensation_joints"] + self.config.gravity_compensation_joints + ) + else: + cmd.append("--no-enable_gravity_compensation") + + if not self._run_in_tmux("control", cmd, wait_time=3, pane_index=0): + print("ERROR: Control loop failed to start!") + self.cleanup() + sys.exit(1) + + print("Control loop started successfully.") + print("Controls: 'i' for initial pose, ']' to activate locomotion") + + def start_policy(self): + """Start either teleop or inference policy based on configuration""" + if not self.config.enable_upper_body_operation: + print("Upper body operation disabled in config.") + return + + self.start_teleop() + + def start_teleop(self): + """Start the teleoperation policy""" + print("Starting teleoperation policy...") + cmd = [ + sys.executable, + str(self.project_root / "control/main/teleop/run_teleop_policy_loop.py"), + "--body_control_device", + self.config.body_control_device, + "--hand_control_device", + self.config.hand_control_device, + "--body_streamer_ip", + self.config.body_streamer_ip, + "--body_streamer_keyword", + self.config.body_streamer_keyword, + ] + + # Handle boolean flags using tyro syntax + if self.config.enable_waist: + cmd.append("--enable_waist") + else: + cmd.append("--no-enable_waist") + + if self.config.high_elbow_pose: + cmd.append("--high_elbow_pose") + else: + cmd.append("--no-high_elbow_pose") + + if self.config.enable_visualization: + cmd.append("--enable_visualization") + else: + cmd.append("--no-enable_visualization") + + if self.config.enable_real_device: + cmd.append("--enable_real_device") + else: + cmd.append("--no-enable_real_device") + + if not self._run_in_tmux("teleop", cmd, pane_index=2): + print("ERROR: Teleoperation policy failed to start!") + print("Continuing without teleoperation...") + else: + print("Teleoperation policy started successfully.") + print("Press 'l' in the control loop terminal to start teleoperation.") + + def start_data_collection(self): + """Start the data collection process""" + if not self.config.data_collection: + print("Data collection disabled in config.") + return + + print("Starting data collection...") + cmd = [ + sys.executable, + str(self.project_root / "control/main/teleop/run_g1_data_exporter.py"), + "--data_collection_frequency", + str(self.config.data_collection_frequency), + "--root_output_dir", + self.config.root_output_dir, + "--lower_body_policy", + self.config.wbc_version, + "--wbc_model_path", + self.config.wbc_model_path, + "--camera_host", + self.config.camera_host, + "--camera_port", + str(self.config.camera_port), + ] + + if not self._run_in_tmux("data", cmd, pane_index=1): + print("ERROR: Data collection failed to start!") + print("Continuing without data collection...") + else: + print("Data collection started successfully.") + print("Press 'c' in the control loop terminal to start/stop recording data.") + + def start_webcam_recording(self): + """Start webcam recording for real robot deployment monitoring""" + if not self.config.enable_webcam_recording or self.config.env_type != "real": + return + + print("Starting webcam recording for deployment monitoring...") + cmd = [ + sys.executable, + str(self.project_root / "scripts/run_webcam_recorder.py"), + "--output_dir", + self.config.webcam_output_dir, + ] + + if not self._run_in_tmux("webcam", cmd): + print("ERROR: Webcam recording failed to start!") + print("Continuing without webcam recording...") + else: + print("Webcam recording started successfully.") + print("External camera recording deployment activities to logs_experiment/") + + def deploy(self): + """ + Run the complete deployment process + """ + print("Starting G1 deployment with config:") + print(f" Robot IP: {self.config.robot_ip}") + print(f" WBC Version: {self.config.wbc_version}") + print(f" Interface: {self.config.interface}") + print(f" Policy Mode: {self.config.upper_body_operation_mode}") + print(f" With Hands: {self.config.with_hands}") + print(f" View Camera: {self.config.view_camera}") + print(f" Enable Waist: {self.config.enable_waist}") + print(f" High Elbow Pose: {self.config.high_elbow_pose}") + print(f" Gravity Compensation: {self.config.enable_gravity_compensation}") + if self.config.enable_gravity_compensation: + print(f" Gravity Comp Joints: {self.config.gravity_compensation_joints}") + print( + f" Webcam Recording: {self.config.enable_webcam_recording and self.config.env_type == 'real'}" + ) + print(f" Sim in Single Process: {self.config.sim_in_single_process}") + if self.config.sim_in_single_process: + print(f" Image Publish: {self.config.image_publish}") + + # Check if this is a real robot deployment and run safety checklist + if self.config.env_type == "real": + if not show_deployment_checklist(): + sys.exit(1) + + # Register signal handler for clean shutdown + signal.signal(signal.SIGINT, self.signal_handler) + + # Start components in sequence + # Start sim loop first if sim_in_single_process is enabled + if self.config.sim_in_single_process: + self.start_sim_loop() + + self.start_control_loop() + self.start_camera_viewer() + self.start_policy() # This will start either teleop or inference policy + self.start_data_collection() + self.start_webcam_recording() # Only runs for real robot deployment + + print("\n--- G1 DEPLOYMENT COMPLETE ---") + print("All systems running in tmux session:", self.session_name) + print("Press Ctrl+b then d to detach from the session") + print("Press Ctrl+\\ in any window to shutdown all components.") + + try: + # Automatically attach to the tmux session and switch to control window + subprocess.run( + [ + "tmux", + "attach", + "-t", + self.session_name, + ";", + "select-window", + "-t", + "control_data_teleop", + ] + ) + except KeyboardInterrupt: + print("\nShutdown requested...") + self.cleanup() + sys.exit(0) + + # Keep main thread alive to handle signals + try: + while True: + # Check if tmux session still exists + result = subprocess.run( + ["tmux", "has-session", "-t", self.session_name], capture_output=True, text=True + ) + + if result.returncode != 0: + print("Tmux session terminated. Exiting.") + break + + time.sleep(1) + except KeyboardInterrupt: + print("\nShutdown requested...") + finally: + self.cleanup() + + def cleanup(self): + """Clean up tmux session""" + print("Cleaning up tmux session...") + try: + # Kill the tmux session + subprocess.run(["tmux", "kill-session", "-t", self.session_name], timeout=5) + print("Tmux session terminated successfully.") + except subprocess.TimeoutExpired: + print("Warning: Tmux session termination timed out, forcing kill...") + subprocess.run(["tmux", "kill-session", "-t", self.session_name, "-9"]) + except Exception as e: + print(f"Warning: Error during cleanup: {e}") + print("Cleanup complete.") + + def signal_handler(self, sig, frame): + """Handle SIGINT (Ctrl+C) gracefully""" + print("\nShutdown signal received...") + self.cleanup() + sys.exit(0) + + +def main(): + """Main entry point with automatic CLI generation from G1Config dataclass""" + # This single line automatically generates a complete CLI from the dataclass! + config = tyro.cli(DeploymentConfig) + + # Run deployment with the configured settings + deployment = G1Deployment(config) + deployment.deploy() + + +if __name__ == "__main__": + # Edited outside docker + + main() diff --git a/scripts/leap_tracking_example.py b/decoupled_wbc/scripts/leap_tracking_example.py similarity index 100% rename from scripts/leap_tracking_example.py rename to decoupled_wbc/scripts/leap_tracking_example.py diff --git a/scripts/run_webcam_recorder.py b/decoupled_wbc/scripts/run_webcam_recorder.py similarity index 100% rename from scripts/run_webcam_recorder.py rename to decoupled_wbc/scripts/run_webcam_recorder.py diff --git a/gr00t_wbc/sim2mujoco/.gitattributes b/decoupled_wbc/sim2mujoco/.gitattributes similarity index 100% rename from gr00t_wbc/sim2mujoco/.gitattributes rename to decoupled_wbc/sim2mujoco/.gitattributes diff --git a/gr00t_wbc/sim2mujoco/.gitignore b/decoupled_wbc/sim2mujoco/.gitignore similarity index 100% rename from gr00t_wbc/sim2mujoco/.gitignore rename to decoupled_wbc/sim2mujoco/.gitignore diff --git a/gr00t_wbc/sim2mujoco/README.md b/decoupled_wbc/sim2mujoco/README.md similarity index 100% rename from gr00t_wbc/sim2mujoco/README.md rename to decoupled_wbc/sim2mujoco/README.md diff --git a/gr00t_wbc/sim2mujoco/requirements.txt b/decoupled_wbc/sim2mujoco/requirements.txt similarity index 100% rename from gr00t_wbc/sim2mujoco/requirements.txt rename to decoupled_wbc/sim2mujoco/requirements.txt diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/README.md b/decoupled_wbc/sim2mujoco/resources/robots/g1/README.md similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/README.md rename to decoupled_wbc/sim2mujoco/resources/robots/g1/README.md diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/g1.urdf b/decoupled_wbc/sim2mujoco/resources/robots/g1/g1.urdf similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/g1.urdf rename to decoupled_wbc/sim2mujoco/resources/robots/g1/g1.urdf diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/g1.xml b/decoupled_wbc/sim2mujoco/resources/robots/g1/g1.xml similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/g1.xml rename to decoupled_wbc/sim2mujoco/resources/robots/g1/g1.xml diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/g1.yaml b/decoupled_wbc/sim2mujoco/resources/robots/g1/g1.yaml similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/g1.yaml rename to decoupled_wbc/sim2mujoco/resources/robots/g1/g1.yaml diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/g1_gear_wbc.xml b/decoupled_wbc/sim2mujoco/resources/robots/g1/g1_gear_wbc.xml similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/g1_gear_wbc.xml rename to decoupled_wbc/sim2mujoco/resources/robots/g1/g1_gear_wbc.xml diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/g1_gear_wbc.yaml b/decoupled_wbc/sim2mujoco/resources/robots/g1/g1_gear_wbc.yaml similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/g1_gear_wbc.yaml rename to decoupled_wbc/sim2mujoco/resources/robots/g1/g1_gear_wbc.yaml diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/head_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/head_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/head_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/head_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_ankle_pitch_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_ankle_pitch_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_ankle_pitch_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_ankle_pitch_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_ankle_roll_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_ankle_roll_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_ankle_roll_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_ankle_roll_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_elbow_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_elbow_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_elbow_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_elbow_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_index_0_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_index_0_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_index_0_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_index_0_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_index_1_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_index_1_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_index_1_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_index_1_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_middle_0_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_middle_0_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_middle_0_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_middle_0_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_middle_1_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_middle_1_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_middle_1_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_middle_1_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_palm_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_palm_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_palm_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_palm_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_thumb_0_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_thumb_0_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_thumb_0_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_thumb_0_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_thumb_1_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_thumb_1_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_thumb_1_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_thumb_1_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_thumb_2_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_thumb_2_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_thumb_2_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hand_thumb_2_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hip_pitch_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hip_pitch_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hip_pitch_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hip_pitch_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hip_roll_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hip_roll_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hip_roll_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hip_roll_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hip_yaw_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hip_yaw_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_hip_yaw_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_hip_yaw_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_knee_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_knee_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_knee_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_knee_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_rubber_hand.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_rubber_hand.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_rubber_hand.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_rubber_hand.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_shoulder_pitch_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_shoulder_pitch_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_shoulder_pitch_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_shoulder_pitch_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_shoulder_roll_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_shoulder_roll_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_shoulder_roll_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_shoulder_roll_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_shoulder_yaw_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_shoulder_yaw_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_shoulder_yaw_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_shoulder_yaw_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_wrist_pitch_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_wrist_pitch_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_wrist_pitch_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_wrist_pitch_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_wrist_roll_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_wrist_roll_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_wrist_roll_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_wrist_roll_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_wrist_roll_rubber_hand.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_wrist_roll_rubber_hand.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_wrist_roll_rubber_hand.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_wrist_roll_rubber_hand.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_wrist_yaw_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_wrist_yaw_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/left_wrist_yaw_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/left_wrist_yaw_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/logo_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/logo_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/logo_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/logo_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/pelvis.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/pelvis.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/pelvis.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/pelvis.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/pelvis_contour_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/pelvis_contour_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/pelvis_contour_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/pelvis_contour_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_ankle_pitch_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_ankle_pitch_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_ankle_pitch_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_ankle_pitch_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_ankle_roll_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_ankle_roll_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_ankle_roll_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_ankle_roll_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_elbow_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_elbow_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_elbow_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_elbow_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_index_0_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_index_0_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_index_0_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_index_0_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_index_1_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_index_1_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_index_1_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_index_1_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_middle_0_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_middle_0_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_middle_0_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_middle_0_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_middle_1_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_middle_1_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_middle_1_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_middle_1_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_palm_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_palm_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_palm_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_palm_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_thumb_0_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_thumb_0_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_thumb_0_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_thumb_0_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_thumb_1_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_thumb_1_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_thumb_1_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_thumb_1_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_thumb_2_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_thumb_2_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_thumb_2_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hand_thumb_2_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hip_pitch_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hip_pitch_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hip_pitch_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hip_pitch_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hip_roll_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hip_roll_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hip_roll_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hip_roll_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hip_yaw_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hip_yaw_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_hip_yaw_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_hip_yaw_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_knee_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_knee_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_knee_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_knee_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_rubber_hand.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_rubber_hand.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_rubber_hand.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_rubber_hand.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_shoulder_pitch_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_shoulder_pitch_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_shoulder_pitch_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_shoulder_pitch_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_shoulder_roll_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_shoulder_roll_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_shoulder_roll_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_shoulder_roll_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_shoulder_yaw_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_shoulder_yaw_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_shoulder_yaw_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_shoulder_yaw_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_wrist_pitch_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_wrist_pitch_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_wrist_pitch_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_wrist_pitch_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_wrist_roll_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_wrist_roll_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_wrist_roll_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_wrist_roll_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_wrist_roll_rubber_hand.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_wrist_roll_rubber_hand.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_wrist_roll_rubber_hand.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_wrist_roll_rubber_hand.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_wrist_yaw_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_wrist_yaw_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/right_wrist_yaw_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/right_wrist_yaw_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/torso_constraint_L_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/torso_constraint_L_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/torso_constraint_L_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/torso_constraint_L_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/torso_constraint_L_rod_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/torso_constraint_L_rod_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/torso_constraint_L_rod_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/torso_constraint_L_rod_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/torso_constraint_R_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/torso_constraint_R_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/torso_constraint_R_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/torso_constraint_R_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/torso_constraint_R_rod_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/torso_constraint_R_rod_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/torso_constraint_R_rod_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/torso_constraint_R_rod_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/torso_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/torso_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/torso_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/torso_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/torso_link_23dof_rev_1_0.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/torso_link_23dof_rev_1_0.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/torso_link_23dof_rev_1_0.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/torso_link_23dof_rev_1_0.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/torso_link_rev_1_0.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/torso_link_rev_1_0.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/torso_link_rev_1_0.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/torso_link_rev_1_0.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/waist_constraint_L.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/waist_constraint_L.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/waist_constraint_L.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/waist_constraint_L.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/waist_constraint_R.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/waist_constraint_R.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/waist_constraint_R.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/waist_constraint_R.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/waist_roll_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/waist_roll_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/waist_roll_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/waist_roll_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/waist_roll_link_rev_1_0.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/waist_roll_link_rev_1_0.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/waist_roll_link_rev_1_0.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/waist_roll_link_rev_1_0.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/waist_support_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/waist_support_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/waist_support_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/waist_support_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/waist_yaw_link.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/waist_yaw_link.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/waist_yaw_link.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/waist_yaw_link.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/waist_yaw_link_rev_1_0.STL b/decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/waist_yaw_link_rev_1_0.STL similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/meshes/waist_yaw_link_rev_1_0.STL rename to decoupled_wbc/sim2mujoco/resources/robots/g1/meshes/waist_yaw_link_rev_1_0.STL diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/policy/GR00T-WholeBodyControl-Balance.onnx b/decoupled_wbc/sim2mujoco/resources/robots/g1/policy/GR00T-WholeBodyControl-Balance.onnx similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/policy/GR00T-WholeBodyControl-Balance.onnx rename to decoupled_wbc/sim2mujoco/resources/robots/g1/policy/GR00T-WholeBodyControl-Balance.onnx diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/policy/GR00T-WholeBodyControl-Walk.onnx b/decoupled_wbc/sim2mujoco/resources/robots/g1/policy/GR00T-WholeBodyControl-Walk.onnx similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/policy/GR00T-WholeBodyControl-Walk.onnx rename to decoupled_wbc/sim2mujoco/resources/robots/g1/policy/GR00T-WholeBodyControl-Walk.onnx diff --git a/gr00t_wbc/sim2mujoco/resources/robots/g1/policy/NVIDIA Open Model License b/decoupled_wbc/sim2mujoco/resources/robots/g1/policy/NVIDIA Open Model License similarity index 100% rename from gr00t_wbc/sim2mujoco/resources/robots/g1/policy/NVIDIA Open Model License rename to decoupled_wbc/sim2mujoco/resources/robots/g1/policy/NVIDIA Open Model License diff --git a/gr00t_wbc/sim2mujoco/scripts/run_mujoco_gear_wbc.py b/decoupled_wbc/sim2mujoco/scripts/run_mujoco_gear_wbc.py similarity index 100% rename from gr00t_wbc/sim2mujoco/scripts/run_mujoco_gear_wbc.py rename to decoupled_wbc/sim2mujoco/scripts/run_mujoco_gear_wbc.py diff --git a/gr00t_wbc/sim2mujoco/scripts/run_mujoco_gear_wbc_gait.py b/decoupled_wbc/sim2mujoco/scripts/run_mujoco_gear_wbc_gait.py similarity index 100% rename from gr00t_wbc/sim2mujoco/scripts/run_mujoco_gear_wbc_gait.py rename to decoupled_wbc/sim2mujoco/scripts/run_mujoco_gear_wbc_gait.py diff --git a/tests/conftest.py b/decoupled_wbc/tests/conftest.py similarity index 100% rename from tests/conftest.py rename to decoupled_wbc/tests/conftest.py diff --git a/tests/control/__init__.py b/decoupled_wbc/tests/control/__init__.py similarity index 100% rename from tests/control/__init__.py rename to decoupled_wbc/tests/control/__init__.py diff --git a/tests/control/main/__init__.py b/decoupled_wbc/tests/control/main/__init__.py similarity index 100% rename from tests/control/main/__init__.py rename to decoupled_wbc/tests/control/main/__init__.py diff --git a/tests/control/main/teleop/__init__.py b/decoupled_wbc/tests/control/main/teleop/__init__.py similarity index 100% rename from tests/control/main/teleop/__init__.py rename to decoupled_wbc/tests/control/main/teleop/__init__.py diff --git a/decoupled_wbc/tests/control/main/teleop/test_g1_control_loop.py b/decoupled_wbc/tests/control/main/teleop/test_g1_control_loop.py new file mode 100644 index 0000000..15ab9c4 --- /dev/null +++ b/decoupled_wbc/tests/control/main/teleop/test_g1_control_loop.py @@ -0,0 +1,469 @@ +import argparse +import os +from pathlib import Path +import signal +import subprocess +import threading +import time + +import numpy as np +import pytest +import rclpy +from scipy.spatial.transform import Rotation as R +from std_msgs.msg import String as RosStringMsg + +from decoupled_wbc.control.main.constants import ( + CONTROL_GOAL_TOPIC, + KEYBOARD_INPUT_TOPIC, + STATE_TOPIC_NAME, +) +from decoupled_wbc.control.utils.ros_utils import ROSMsgPublisher, ROSMsgSubscriber +from decoupled_wbc.control.utils.term_color_constants import GREEN_BOLD, RESET, YELLOW_BOLD +from decoupled_wbc.data.viz.rerun_viz import RerunViz + + +class KeyboardPublisher: + def __init__(self, topic_name: str = KEYBOARD_INPUT_TOPIC): + assert rclpy.ok(), "Expected ROS2 to be initialized in this process..." + executor = rclpy.get_global_executor() + self.node = executor.get_nodes()[0] + self.publisher = self.node.create_publisher(RosStringMsg, topic_name, 1) + + def publish(self, key: str): + msg = RosStringMsg() + msg.data = key + self.publisher.publish(msg) + + +def is_robot_fallen_from_quat(mujoco_quat): + # Convert MuJoCo [w, x, y, z] → SciPy [x, y, z, w] + w, x, y, z = mujoco_quat + scipy_quat = [x, y, z, w] + + r = R.from_quat(scipy_quat) + roll, pitch, _ = r.as_euler("xyz", degrees=False) + + MAX_ROLL_PITCH = np.radians(60) + print(f"[Fall Check] roll={roll:.3f} rad, pitch={pitch:.3f} rad") + return abs(roll) > MAX_ROLL_PITCH or abs(pitch) > MAX_ROLL_PITCH + + +class LocomotionRunner: + def __init__(self, test_mode: str = "squat"): + self.test_mode = test_mode + if not rclpy.ok(): + rclpy.init(args=None) + self.node = rclpy.create_node(f"EvalDriver_{test_mode}_{int(time.time())}") + + # gracefully shutdown the spin thread when the test is done + self._stop_event = threading.Event() + + self.spin_thread = threading.Thread(target=self._spin_loop, daemon=False) + self.spin_thread.start() + + self.keyboard_event_publisher = KeyboardPublisher(KEYBOARD_INPUT_TOPIC) + self.control_publisher = ROSMsgPublisher(CONTROL_GOAL_TOPIC) + self.state_subscriber = ROSMsgSubscriber(STATE_TOPIC_NAME) + print(f"{test_mode} test initialized...") + + def _spin_loop(self): + try: + while rclpy.ok() and not self._stop_event.is_set(): + rclpy.spin_once(self.node) + except rclpy.executors.ExternalShutdownException: + print("[INFO] Spin thread exiting due to shutdown.") + finally: + print("spin loop stopped...") + + def warm_up(self): + """Stabilize and release the robot.""" + print("waiting for 2 seconds...") + time.sleep(2) + print(f"running {self.test_mode} test...") + self.activate() + print("activated...") + time.sleep(1) + self.release() + print("released...") + time.sleep(5) + + def _run_walk_test(self): + self.walk_forward() # speed up to 0.2 m/s + time.sleep(1) + self.walk_forward() # speed up to 0.4 m/s + + rate = self.node.create_rate(0.5) + start_time = time.time() + while rclpy.ok() and (time.time() - start_time) < 10.0: + obs = self.state_subscriber.get_msg() + + if is_robot_fallen_from_quat(obs["torso_quat"]): + print("robot fallen...") + return 0 + elif self._check_success_condition(obs): + print(f"robot reaching target ({self.test_mode})...") + return 1, {} + else: + rate.sleep() + + print("test timed out after 10 seconds...") + return 0, {} + + def _run_squat_test(self): + rate = self.node.create_rate(0.5) + start_time = time.time() + while rclpy.ok() and (time.time() - start_time) < 10.0: + obs = self.state_subscriber.get_msg() + + if is_robot_fallen_from_quat(obs["torso_quat"]): + print("robot fallen...") + return 0, {} + elif self._check_success_condition(obs): + print(f"robot reaching target ({self.test_mode})...") + return 1, {} + else: + self.go_down() + rate.sleep() + + print("test timed out after 10 seconds...") + return 0, {} + + def cmd_to_velocity(self, cmd_list): + cmd_to_velocity = { + "w": np.array([0.2, 0.0, 0.0]), + "s": np.array([-0.2, 0.0, 0.0]), + "q": np.array([0.0, 0.2, 0.0]), + "e": np.array([0.0, -0.2, 0.0]), + "z": np.array([0.0, 0.0, 0.0]), + } + + accumulated_velocity = np.array([0.0, 0.0, 0.0]) + velocity_list = [] + for cmd in cmd_list: + if cmd == "z": + accumulated_velocity = [0.0, 0.0, 0.0] + elif cmd in ["CHECK", "SKIP"]: + accumulated_velocity = velocity_list[-1] + else: + accumulated_velocity += cmd_to_velocity[cmd] + velocity_list.append(accumulated_velocity.copy()) + + return velocity_list + + def _run_stop_test(self): + base_vel_thres = 0.25 + + cmd_list = ( + ["w", "w", "w", "w", "s", "s", "s", "z", "SKIP", "CHECK"] + + ["s", "s", "q", "w", "w", "w", "e", "s", "s", "z", "SKIP", "CHECK"] + + ["q", "q", "w", "q", "e", "s", "s", "e", "w", "z", "SKIP", "CHECK"] + + ["w", "w", "w", "w", "w", "s", "s", "s", "s", "z", "SKIP", "CHECK"] + ) + + success_flag = 1 + + statistics = { + "floating_base_pose": {"state": []}, + "floating_base_vel": {"state": [], "cmd": []}, + "timestamp": [], + } + for cmd in cmd_list: + self.keyboard_event_publisher.publish(cmd) + time.sleep(0.5) + obs = self.state_subscriber.get_msg() + statistics["floating_base_pose"]["state"].append( + np.linalg.norm(obs["floating_base_pose"]) + ) + statistics["floating_base_vel"]["state"].append( + np.linalg.norm(obs["floating_base_vel"]) + ) + statistics["timestamp"].append(time.time()) + + if cmd == "CHECK" and np.linalg.norm(obs["floating_base_vel"]) > base_vel_thres: + print( + f" [{YELLOW_BOLD}WARNING{RESET}] robot is not stopped fully. " + f"Current base velocity: {np.linalg.norm(obs['floating_base_vel']):.3f} > {base_vel_thres:.3f}" + ) + # success_flag = 0 # robot is not stopped + + time.sleep(0.5) + + vel_cmd = self.cmd_to_velocity(cmd_list) + vel_cmd = [np.linalg.norm(v) for v in vel_cmd] + statistics["floating_base_vel"]["cmd"] = vel_cmd + return success_flag, statistics + + def _run_eef_track_test(self): + from decoupled_wbc.control.policy.lerobot_replay_policy import LerobotReplayPolicy + + parquet_path = ( + Path(__file__).parent.parent.parent.parent / "replay_data" / "g1_pnpbottle.parquet" + ) + replay_policy = LerobotReplayPolicy(parquet_path=str(parquet_path)) + + freq = 50 + rate = self.node.create_rate(freq) + + statistics = { + # "floating_base_pose": {"state": [], "cmd": []}, + "eef_base_pose": {"state": [], "cmd": []}, + "timestamp": [], + } + + for ii in range(500): + action = replay_policy.get_action() + action = replay_policy.action_to_cmd(action) + action["timestamp"] = time.monotonic() + action["target_time"] = time.monotonic() + ii / freq + self.control_publisher.publish(action) + obs = self.state_subscriber.get_msg() + if obs is None: + print("no obs...") + continue + gt_obs = replay_policy.get_observation() + + # statistics["floating_base_pose"]["state"].append(obs["floating_base_pose"]) + # statistics["floating_base_pose"]["cmd"].append(np.zeros_like(obs["floating_base_pose"])) + statistics["eef_base_pose"]["state"].append(obs["wrist_pose"]) + statistics["eef_base_pose"]["cmd"].append(gt_obs["wrist_pose"]) + statistics["timestamp"].append(time.time()) + + pos_err = np.linalg.norm(obs["wrist_pose"][:3] - gt_obs["wrist_pose"][:3]) + if pos_err > 1e-1: + print( + f" [{YELLOW_BOLD}WARNING{RESET}] robot failed to track the eef, " + f"error: {pos_err:.3f} ({self.test_mode})..." + ) + return 0, statistics + + if is_robot_fallen_from_quat(obs["torso_quat"]): + print("robot fallen...") + return 0, statistics + else: + rate.sleep() + + return 1, statistics + + def run(self): + self.warm_up() + + test_mode_to_func = { + "squat": self._run_squat_test, + "walk": self._run_walk_test, + "stop": self._run_stop_test, + "eef_track": self._run_eef_track_test, + } + + result, statistics = test_mode_to_func[self.test_mode]() + + self.post_process(statistics) + return result + + def _check_success_condition(self, obs): + if self.test_mode == "squat": + return obs["floating_base_pose"][2] < 0.4 + elif self.test_mode == "walk": + return np.linalg.norm(obs["floating_base_pose"][0:2]) > 1.0 + return False + + def activate(self): + self.keyboard_event_publisher.publish("]") + + def release(self): + self.keyboard_event_publisher.publish("9") + + def go_down(self): + self.keyboard_event_publisher.publish("2") + + def walk_forward(self): + self.keyboard_event_publisher.publish("w") + + def walk_stop(self): + self.keyboard_event_publisher.publish("z") + + def post_process(self, statistics): + if len(statistics) == 0: + return + + # plot the statistics + plot_keys = [key for key in statistics.keys() if key != "timestamp"] + viz = RerunViz( + image_keys=[], + tensor_keys=plot_keys, + window_size=10.0, + app_name=f"{self.test_mode}_test", + ) + + for ii in range(len(statistics[plot_keys[0]]["state"])): + tensor_data = {} + for k in plot_keys: + if "state" in statistics[k] and "cmd" in statistics[k]: + tensor_data[k] = np.array( + (statistics[k]["state"][ii], statistics[k]["cmd"][ii]) + ).reshape(2, -1) + else: + tensor_data[k] = np.asarray(statistics[k]["state"][ii]).reshape(1, -1) + viz.plot_tensors( + tensor_data, + statistics["timestamp"][ii], + ) + + if self.test_mode == "stop": + base_velocity = statistics["floating_base_vel"]["state"] + base_velocity_cmd = statistics["floating_base_vel"]["cmd"] + + base_velocity_tracking_err = [] + for v_cmd, v in zip(base_velocity_cmd, base_velocity): # TODO: check if this is correct + if v_cmd.max() < 1e-4: + base_velocity_tracking_err.append(v) + print( + f" [{GREEN_BOLD}INFO{RESET}] Base velocity tracking when stopped: " + f"{np.mean(base_velocity_tracking_err):.3f}" + ) + + if self.test_mode == "eef_track": + eef_pose = statistics["eef_base_pose"]["state"] + eef_pose_cmd = statistics["eef_base_pose"]["cmd"] + eef_pose_tracking_err = [] + for p_cmd, p in zip(eef_pose_cmd, eef_pose): + eef_pose_tracking_err.append(np.linalg.norm(p - p_cmd)) + print( + f" [{GREEN_BOLD}INFO{RESET}] Eef pose tracking error: {np.mean(eef_pose_tracking_err):.3f}" + ) + + def shutdown(self): + self._stop_event.set() + self.spin_thread.join() + del self.state_subscriber + del self.keyboard_event_publisher + # Don't shutdown ROS between tests - let pytest handle it + + +def start_g1_control_loop(): + proc = subprocess.Popen( + [ + "python3", + "decoupled_wbc/control/main/teleop/run_g1_control_loop.py", + "--keyboard_dispatcher_type", + "ros", + "--enable-offscreen", + ], + preexec_fn=os.setsid, + ) + time.sleep(10) + return proc + + +def run_test(test_mode: str): + """Run a single test with the specified mode.""" + proc = start_g1_control_loop() + print(f"G1 control loop started for {test_mode} test...") + + test = LocomotionRunner(test_mode) + result = test.run() + + print("Shutting down...") + test.shutdown() + proc.send_signal(signal.SIGKILL) + proc.wait() + + return result + + +def test_squat(): + """Pytest function for squat test.""" + result = run_test("squat") + assert result == 1, "Squat test failed - robot either fell or didn't reach target height" + + +def test_walk(): + """Pytest function for walk test.""" + result = run_test("walk") + assert result == 1, "Walk test failed - robot either fell or didn't reach target distance" + + +@pytest.mark.skip(reason="skipping test for now, cicd test always gets killed") +def test_stop(): + """Pytest function for walking to a nearby position and stop test.""" + result = run_test("stop") + assert result == 1, "Stop test failed - robot either fell or didn't reach target distance" + + +@pytest.mark.skip(reason="skipping test for now, cicd test always gets killed") +def test_eef_track(): + """Pytest function for eef track test.""" + result = run_test("eef_track") + assert result == 1, "Eef track test failed - robot either fell or didn't reach target distance" + + +def main(): + parser = argparse.ArgumentParser(description="Run locomotion tests") + parser.add_argument("--squat", action="store_true", help="Run squat test only") + parser.add_argument("--walk", action="store_true", help="Run walk test only") + parser.add_argument("--stop", action="store_true", help="Run stop test only") + parser.add_argument("--eef_track", action="store_true", help="Run eef track test only") + + args = parser.parse_args() + + if args.squat and args.walk: + print("Error: Cannot specify both --squat and --walk") + return 1 + + if args.squat: + print("Running squat test only...") + result = run_test("squat") + if result == 1: + print("✓ Squat test PASSED") + return 0 + else: + print("✗ Squat test FAILED") + return 1 + + elif args.walk: + print("Running walk test only...") + result = run_test("walk") + if result == 1: + print("✓ Walk test PASSED") + return 0 + else: + print("✗ Walk test FAILED") + return 1 + + elif args.stop: + print("Running stop test only...") + result = run_test("stop") + if result == 1: + print("✓ Stop test PASSED") + return 0 + else: + print("✗ Stop test FAILED") + return 1 + + elif args.eef_track: + print("Running eef track test only...") + result = run_test("eef_track") + if result == 1: + print("✓ Eef track test PASSED") + return 0 + else: + print("✗ Eef track test FAILED") + return 1 + + else: + print("Running both tests...") + squat_result = run_test("squat") + walk_result = run_test("walk") + + if squat_result == 1 and walk_result == 1: + print("✓ All tests PASSED") + return 0 + else: + print( + f"✗ Test results: squat={'PASSED' if squat_result == 1 else 'FAILED'}, " + f"walk={'PASSED' if walk_result == 1 else 'FAILED'}" + ) + return 1 + + +if __name__ == "__main__": + exit(main()) diff --git a/decoupled_wbc/tests/control/main/test_data_exporter_loop.py b/decoupled_wbc/tests/control/main/test_data_exporter_loop.py new file mode 100644 index 0000000..8277473 --- /dev/null +++ b/decoupled_wbc/tests/control/main/test_data_exporter_loop.py @@ -0,0 +1,403 @@ +import glob +import os +import tempfile +from unittest.mock import MagicMock, patch + +import numpy as np +import pytest + +try: + from decoupled_wbc.control.main.teleop.run_g1_data_exporter import Gr00tDataCollector + from decoupled_wbc.control.robot_model.instantiation.g1 import instantiate_g1_robot_model + from decoupled_wbc.data.constants import RS_VIEW_CAMERA_HEIGHT, RS_VIEW_CAMERA_WIDTH + from decoupled_wbc.data.exporter import Gr00tDataExporter + from decoupled_wbc.data.utils import get_dataset_features +except ModuleNotFoundError as e: + if "No module named 'rclpy'" in str(e): + pytestmark = pytest.mark.skip(reason="ROS (rclpy) is not installed") + else: + raise e + + +import json + +# How does mocking ROS work? +# +# This test file uses mocking to simulate a ROS environment without requiring actual ROS hardware: +# +# 1. ros_ok_side_effect: Controls how long the ROS loop runs by returning a sequence of +# True/False values. [True, True, False] means "run for 2 iterations then stop" +# +# 2. MockROSMsgSubscriber: Simulates sensors (camera/state) by returning pre-defined data: +# +# 3. MockKeyboardListenerSubscriber: Simulates user input: +# - 'c' = start/stop recording +# - 'd' = discard episode +# - KeyboardInterrupt = simulate Ctrl+C +# - None = no input +# +# 4. MockROSEnvironment: A context manager that patches all ROS dependencies to use our mocks, +# allowing us to test ROS-dependent code without actual ROS running. + + +class MockROSMsgSubscriber: + def __init__(self, return_value: list[dict]): + self.return_value = return_value + self.counter = 0 + + def get_image(self): + if self.counter < len(self.return_value): + self.counter += 1 + return self.return_value[self.counter - 1] + else: + return None + + def get_msg(self): + if self.counter < len(self.return_value): + self.counter += 1 + return self.return_value[self.counter - 1] + else: + return None + + +class MockKeyboardListenerSubscriber: + def __init__(self, return_value: list[str]): + self.return_value = return_value + self.counter = 0 + + def get_keyboard_input(self): + return self.return_value[self.counter] + + def read_msg(self): + if self.counter < len(self.return_value): + result = self.return_value[self.counter] + if isinstance(result, KeyboardInterrupt): + raise result + self.counter += 1 + return result + return None + + +class MockROSEnvironment: + """Context manager for mocking ROS environment and subscribers.""" + + def __init__(self, ok_side_effect, keyboard_listener, img_subscriber, state_subscriber): + self.ok_side_effect = ok_side_effect + self.keyboard_listener = keyboard_listener + self.img_subscriber = img_subscriber + self.state_subscriber = state_subscriber + self.patches = [] + + def __enter__(self): + self.patches = [ + patch("rclpy.init"), + patch("rclpy.create_node"), + patch("rclpy.spin"), + patch("rclpy.ok", side_effect=self.ok_side_effect), + patch("rclpy.shutdown"), + patch( + "decoupled_wbc.control.main.teleop.run_g1_data_exporter.KeyboardListenerSubscriber", + return_value=self.keyboard_listener, + ), + patch( + "decoupled_wbc.control.main.teleop.run_g1_data_exporter.ROSImgMsgSubscriber", + return_value=self.img_subscriber, + ), + patch( + "decoupled_wbc.control.main.teleop.run_g1_data_exporter.ROSMsgSubscriber", + return_value=self.state_subscriber, + ), + ] + + for p in self.patches: + p.start() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + for p in reversed(self.patches): + p.stop() + return False + + +def verify_parquet_files_exist(file_path: str, num_episodes: int): + parquet_files = glob.glob(os.path.join(file_path, "data/chunk-*/episode_*.parquet")) + assert ( + len(parquet_files) == num_episodes + ), f"Expected {num_episodes} parquet files, but found {len(parquet_files)}" + + +def verify_video_files_exist(file_path: str, observation_keys: list[str], num_episodes: int): + for observation_key in observation_keys: + video_files = glob.glob( + os.path.join(file_path, f"videos/chunk-*/{observation_key}/episode_*.mp4") + ) + assert ( + len(video_files) == num_episodes + ), f"Expected {num_episodes} video files, but found {len(video_files)}" + + +def verify_metadata_files(file_path: str): + files_to_check = ["episodes.jsonl", "info.json", "tasks.jsonl", "modality.json"] + for file in files_to_check: + assert os.path.exists(os.path.join(file_path, "meta", file)), f"meta/{file} not created" + + +@pytest.fixture +def lerobot_features(): + robot_model = instantiate_g1_robot_model() + return get_dataset_features(robot_model) + + +@pytest.fixture +def modality_config(): + return { + "state": {"feature1": {"start": 0, "end": 4}, "feature2": {"start": 4, "end": 9}}, + "action": {"feature1": {"start": 0, "end": 4}, "feature2": {"start": 4, "end": 9}}, + "video": {"rs_view": {"original_key": "observation.images.ego_view"}}, + "annotation": {"human.task_description": {"original_key": "task_index"}}, + } + + +def _get_image_stream_data(episode_length: int, frame_rate: int, img_height: int, img_width: int): + return [ + { + "image": np.zeros((img_height, img_width, 3), dtype=np.uint8), + "timestamp": (i * 1 / frame_rate), + } + for i in range(episode_length) + ] + + +def _get_state_act_stream_data( + episode_length: int, frame_rate: int, state_dim: int, action_dim: int +): + return [ + { + "q": np.zeros(state_dim), + "action": np.zeros(action_dim), + "timestamp": (i * 1 / frame_rate), + "navigate_command": np.zeros(3, dtype=np.float64), + "base_height_command": 0.0, + "wrist_pose": np.zeros(14, dtype=np.float64), + "action.eef": np.zeros(14, dtype=np.float64), + } + for i in range(episode_length) + ] + + +def test_control_loop_happy_path_workflow(lerobot_features, modality_config): + """ + This test records a single episode and saves it to disk. + """ + episode_length = 10 + frame_rate = 20 + img_stream_data = _get_image_stream_data( + episode_length, frame_rate, RS_VIEW_CAMERA_HEIGHT, RS_VIEW_CAMERA_WIDTH + ) + robot_model = instantiate_g1_robot_model() + state_act_stream_data = _get_state_act_stream_data( + episode_length, frame_rate, robot_model.num_joints, robot_model.num_joints + ) + + keyboard_sub_output = [None for _ in range(episode_length)] + keyboard_sub_output[0] = "c" # Start recording + keyboard_sub_output[-1] = "c" # Stop recording and save + + # --------- Save the first episode --------- + mock_img_sub = MockROSMsgSubscriber(img_stream_data) + mock_state_sub = MockROSMsgSubscriber(state_act_stream_data) + mock_keyboard_listner = MockKeyboardListenerSubscriber(keyboard_sub_output) + + with tempfile.TemporaryDirectory() as temp_dir: + dataset_dir = os.path.join(temp_dir, "dataset") + + data_exporter = Gr00tDataExporter.create( + save_root=dataset_dir, + fps=frame_rate, + features=lerobot_features, + modality_config=modality_config, + task="test", + ) + + ros_ok_side_effect = [True] * (episode_length + 1) + [False] + with MockROSEnvironment( + ros_ok_side_effect, mock_keyboard_listner, mock_img_sub, mock_state_sub + ): + data_collector = Gr00tDataCollector( + camera_topic_name="mock_camera_topic", + state_topic_name="mock_state_topic", + data_exporter=data_exporter, + frequency=frame_rate, + ) + + # mocking to avoid actual sleeping + data_collector.rate = MagicMock() + + data_collector.run() + + verify_parquet_files_exist(dataset_dir, 1) + verify_video_files_exist(dataset_dir, data_exporter.meta.video_keys, 1) + verify_metadata_files(dataset_dir) + + # --------- Save the second episode --------- + # we reset the mock subscribers and re-run the control loop + # This immitates the case where the user starts recording a new episode on an existing dataset + mock_img_sub = MockROSMsgSubscriber(img_stream_data) + mock_state_sub = MockROSMsgSubscriber(state_act_stream_data) + ros_ok_side_effect = [True] * (episode_length + 1) + [False] + mock_keyboard_listner = MockKeyboardListenerSubscriber(keyboard_sub_output) + with MockROSEnvironment( + ros_ok_side_effect, mock_keyboard_listner, mock_img_sub, mock_state_sub + ): + data_collector = Gr00tDataCollector( + camera_topic_name="mock_camera_topic", + state_topic_name="mock_state_topic", + data_exporter=data_exporter, + frequency=frame_rate, + ) + + # mocking to avoid actual sleeping + data_collector.rate = MagicMock() + + data_collector.run() + + # now there should be 2 episodes in the dataset + verify_parquet_files_exist(dataset_dir, 2) + verify_video_files_exist(dataset_dir, data_exporter.meta.video_keys, 2) + verify_metadata_files(dataset_dir) + + +def test_control_loop_keyboard_interrupt_workflow(lerobot_features, modality_config): + """ + This test simulates a keyboard interruption in the middle of recording. + Expected behavior: + - The episode is saved to disk + - The episode is marked as discarded + """ + episode_length = 15 + frame_rate = 20 + img_stream_data = _get_image_stream_data( + episode_length, frame_rate, RS_VIEW_CAMERA_HEIGHT, RS_VIEW_CAMERA_WIDTH + ) + robot_model = instantiate_g1_robot_model() + state_act_stream_data = _get_state_act_stream_data( + episode_length, frame_rate, robot_model.num_joints, robot_model.num_joints + ) + + keyboard_sub_output = [None for _ in range(episode_length)] + keyboard_sub_output[0] = "c" # Start recording + keyboard_sub_output[5] = KeyboardInterrupt() # keyboard interruption in the middle of recording + + mock_img_sub = MockROSMsgSubscriber(img_stream_data) + mock_state_sub = MockROSMsgSubscriber(state_act_stream_data) + mock_keyboard_listener = MockKeyboardListenerSubscriber(keyboard_sub_output) + + with tempfile.TemporaryDirectory() as temp_dir: + dataset_dir = os.path.join(temp_dir, "dataset") + + data_exporter = Gr00tDataExporter.create( + save_root=dataset_dir, + fps=frame_rate, + features=lerobot_features, + modality_config=modality_config, + task="test", + ) + + ros_ok_side_effect = [True] * episode_length + [False] + with MockROSEnvironment( + ros_ok_side_effect, mock_keyboard_listener, mock_img_sub, mock_state_sub + ): + data_collector = Gr00tDataCollector( + camera_topic_name="mock_camera_topic", + state_topic_name="mock_state_topic", + data_exporter=data_exporter, + frequency=frame_rate, + ) + + data_collector.rate = MagicMock() + # try: + data_collector.run() + # except KeyboardInterrupt: + # pass + + verify_parquet_files_exist(dataset_dir, 1) + verify_video_files_exist(dataset_dir, data_exporter.meta.video_keys, 1) + verify_metadata_files(dataset_dir) + + # verify that the episode is marked as discarded + ep_info = json.load(open(os.path.join(dataset_dir, "meta", "info.json"))) + assert ep_info["discarded_episode_indices"][0] == 0 + assert ep_info["total_frames"] == 5 + assert ep_info["total_episodes"] == 1 + + +def test_discarded_episode_workflow(lerobot_features, modality_config): + """ + This test simulates a case where the user discards an episode in the middle of recording. + Expected behavior: + - Record 3 episodes, discard episode 0 and 2 + - There should be 3 episodes saved to disk + - Episode 0 and 2 should be flagged as discarded + """ + episode_length = 17 + frame_rate = 20 + robot_model = instantiate_g1_robot_model() + state_dim = robot_model.num_joints + action_dim = robot_model.num_joints + img_stream_data = _get_image_stream_data( + episode_length, frame_rate, RS_VIEW_CAMERA_HEIGHT, RS_VIEW_CAMERA_WIDTH + ) + state_act_stream_data = _get_state_act_stream_data( + episode_length, frame_rate, state_dim, action_dim + ) + + keyboard_sub_output = [None for _ in range(episode_length)] + keyboard_sub_output[0] = "c" # Start recording episode index 0 + keyboard_sub_output[5] = "x" # Discard episode index 0 + keyboard_sub_output[7] = "c" # Start recording episode index 1 + keyboard_sub_output[10] = "c" # stop recording and save episode index 1 + keyboard_sub_output[12] = "c" # start recording episode index 2 + keyboard_sub_output[15] = "x" # discard episode index 2 + + mock_img_sub = MockROSMsgSubscriber(img_stream_data) + mock_state_sub = MockROSMsgSubscriber(state_act_stream_data) + mock_keyboard_listener = MockKeyboardListenerSubscriber(keyboard_sub_output) + + with tempfile.TemporaryDirectory() as temp_dir: + dataset_dir = os.path.join(temp_dir, "dataset") + + data_exporter = Gr00tDataExporter.create( + save_root=dataset_dir, + fps=frame_rate, + features=lerobot_features, + modality_config=modality_config, + task="test", + ) + + ros_ok_side_effect = [True] * episode_length + [False] + with MockROSEnvironment( + ros_ok_side_effect, mock_keyboard_listener, mock_img_sub, mock_state_sub + ): + data_collector = Gr00tDataCollector( + camera_topic_name="mock_camera_topic", + state_topic_name="mock_state_topic", + data_exporter=data_exporter, + frequency=frame_rate, + ) + + data_collector.rate = MagicMock() + try: + data_collector.run() + except Exception: + pass + + # vrify if the episode is marked as discarded + ep_info = json.load(open(os.path.join(dataset_dir, "meta", "info.json"))) + assert len(ep_info["discarded_episode_indices"]) == 2 + assert ep_info["discarded_episode_indices"][0] == 0 + assert ep_info["discarded_episode_indices"][1] == 2 + + # verify that all episodes are saved regardless of being discarded + verify_parquet_files_exist(dataset_dir, 3) + verify_video_files_exist(dataset_dir, data_exporter.meta.video_keys, 3) + verify_metadata_files(dataset_dir) diff --git a/tests/control/policy/__init__.py b/decoupled_wbc/tests/control/policy/__init__.py similarity index 100% rename from tests/control/policy/__init__.py rename to decoupled_wbc/tests/control/policy/__init__.py diff --git a/tests/control/policy/interpolation_policy/__init__.py b/decoupled_wbc/tests/control/policy/interpolation_policy/__init__.py similarity index 100% rename from tests/control/policy/interpolation_policy/__init__.py rename to decoupled_wbc/tests/control/policy/interpolation_policy/__init__.py diff --git a/decoupled_wbc/tests/control/policy/interpolation_policy/test_interpolation_policy.py b/decoupled_wbc/tests/control/policy/interpolation_policy/test_interpolation_policy.py new file mode 100644 index 0000000..bb5cbf0 --- /dev/null +++ b/decoupled_wbc/tests/control/policy/interpolation_policy/test_interpolation_policy.py @@ -0,0 +1,47 @@ +from pathlib import Path +import pickle + +import numpy as np +import pytest + +from decoupled_wbc.control.policy.interpolation_policy import ( + InterpolationPolicy, +) + + +def get_test_data_path(filename: str) -> str: + """Get the absolute path to a test data file.""" + test_dir = Path(__file__).parent + return str(test_dir / ".." / ".." / ".." / "replay_data" / filename) + + +@pytest.fixture +def logged_data(): + """Load the logged data from file.""" + data_path = get_test_data_path("interpolation_data.pkl") + with open(data_path, "rb") as f: + return pickle.load(f) + + +def test_replay_logged_data(logged_data): + """Test that the wrapper produces the same pose commands as logged data.""" + init_args = logged_data["init_args"] + interp = InterpolationPolicy( + init_time=init_args["curr_t"], + init_values={"target_pose": init_args["curr_pose"]}, + max_change_rate=np.inf, + ) + + # Test all data points including the first one + for c in logged_data["calls"]: + # Get the action from wrapper + if c["type"] == "get_action": + action = interp.get_action(**c["args"]) + expected_action = c["result"] + np.testing.assert_allclose( + action["target_pose"], expected_action["q"], rtol=1e-9, atol=1e-9 + ) + # print(action, expected_action) + + else: + interp.set_goal(**c["args"]) diff --git a/decoupled_wbc/tests/control/policy/interpolation_policy/test_interpolation_ramp_up.py b/decoupled_wbc/tests/control/policy/interpolation_policy/test_interpolation_ramp_up.py new file mode 100644 index 0000000..2c35e72 --- /dev/null +++ b/decoupled_wbc/tests/control/policy/interpolation_policy/test_interpolation_ramp_up.py @@ -0,0 +1,78 @@ +import numpy as np +import pytest + +from decoupled_wbc.control.policy.interpolation_policy import ( + InterpolationPolicy, +) + + +def test_trajectory_interpolation(): + """ + Test that the InterpolationPolicy correctly interpolates between waypoints. + + Initial pose is at all zeros. + At t=4sec, the index 27 position (right_shoulder_yaw_joint) should be -1.5. + We run at 100Hz to see all intermediate waypoints. + + Notes: + - The trajectory data is at 'trajectory_data.npy' in the current directory + - The visualization is at 'trajectory.png' in the current directory + """ + # Create a pose with 32 joints (all zeros initially) + num_joints = 32 + initial_pose = np.zeros(num_joints) + + # Initial time (use a fixed value for reproducibility) + initial_time = 0.0 + + # Create the wrapper with initial pose + interpolator = InterpolationPolicy( + init_time=initial_time, + init_values={"target_pose": initial_pose}, + max_change_rate=np.inf, + ) + + # Target pose: all zeros except index 27 which should be -1.5 + target_pose = np.zeros(num_joints) + target_pose[27] = -1.5 # right_shoulder_yaw_joint + target_time = 4.0 # 4 seconds from now + + # Set the planner command to schedule the waypoint + interpolator.set_goal( + { + "target_pose": target_pose, + "target_time": target_time, + "interpolation_garbage_collection_time": initial_time, + } + ) + + # Sample the trajectory at 100Hz + frequency = 100 + dt = 1.0 / frequency + sample_times = np.arange(initial_time, target_time + dt, dt) + + # Collect the interpolated poses + sampled_poses = [] + for t in sample_times: + action = interpolator.get_action(t) + sampled_poses.append(action["target_pose"]) + + # Convert to numpy array for easier analysis + sampled_poses = np.array(sampled_poses) + + # Check specific requirements + # Verify we actually moved from 0 to -1.5 + joint_27_positions = sampled_poses[:, 27] + assert joint_27_positions[0] == pytest.approx(0.0) + assert joint_27_positions[-1] == pytest.approx(-1.5) + + # Calculate the absolute changes between each step + changes = np.abs(np.diff(joint_27_positions)) + assert np.all(changes < 0.004), "Joint 27 position should change by less than 0.004" + + # Print some statistics about the trajectory + print(f"Total time steps: {len(sample_times)}") + print(f"Joint 27 trajectory start: {joint_27_positions[0]}") + print(f"Joint 27 trajectory end: {joint_27_positions[-1]}") + print(f"Joint 27 max velocity: {np.max(np.abs(np.diff(joint_27_positions) / dt))}") + print(f"Max velocity timestep: {np.argmax(np.abs(np.diff(joint_27_positions) / dt))}") diff --git a/tests/control/policy/interpolation_policy/trajectory.png b/decoupled_wbc/tests/control/policy/interpolation_policy/trajectory.png similarity index 100% rename from tests/control/policy/interpolation_policy/trajectory.png rename to decoupled_wbc/tests/control/policy/interpolation_policy/trajectory.png diff --git a/tests/control/policy/interpolation_policy/trajectory_data.npy b/decoupled_wbc/tests/control/policy/interpolation_policy/trajectory_data.npy similarity index 100% rename from tests/control/policy/interpolation_policy/trajectory_data.npy rename to decoupled_wbc/tests/control/policy/interpolation_policy/trajectory_data.npy diff --git a/tests/control/robot_model/__init__.py b/decoupled_wbc/tests/control/robot_model/__init__.py similarity index 100% rename from tests/control/robot_model/__init__.py rename to decoupled_wbc/tests/control/robot_model/__init__.py diff --git a/decoupled_wbc/tests/control/robot_model/robot_model_test.py b/decoupled_wbc/tests/control/robot_model/robot_model_test.py new file mode 100644 index 0000000..ac1aa8a --- /dev/null +++ b/decoupled_wbc/tests/control/robot_model/robot_model_test.py @@ -0,0 +1,911 @@ +# test_robot_model.py + +import numpy as np +import pinocchio as pin +import pytest + +from decoupled_wbc.control.robot_model import ReducedRobotModel +from decoupled_wbc.control.robot_model.instantiation.g1 import instantiate_g1_robot_model + + +@pytest.fixture +def g1_robot_model(): + """ + Fixture that creates and returns a G1 RobotModel instance. + """ + return instantiate_g1_robot_model() + + +def test_robot_model_initialization(g1_robot_model): + """ + Test initialization of the RobotModel and its main attributes. + """ + for robot_model in [g1_robot_model]: + # Check that the Pinocchio wrapper exists + assert robot_model.pinocchio_wrapper is not None + + # Check number of degrees of freedom (nq) + assert robot_model.num_dofs > 0 + + # Check we have the expected number of joints beyond the floating base + assert len(robot_model.joint_names) > 0 + + # Check that supplemental info is present + assert robot_model.supplemental_info is not None + + +def test_robot_model_joint_names(g1_robot_model): + """ + Test that joint_names is populated correctly + and that dof_index works. + """ + for robot_model in [g1_robot_model]: + # Extract joint names + joint_names = robot_model.joint_names + + # Pick the first joint name and get its index + first_joint_name = joint_names[0] + idx = robot_model.dof_index(first_joint_name) + assert idx >= 0 + + # Test that an unknown joint name raises an error + with pytest.raises(ValueError, match="Unknown joint name"): + _ = robot_model.dof_index("non_existent_joint") + + +def test_robot_model_forward_kinematics_valid_q(g1_robot_model): + """ + Test that cache_forward_kinematics works with a valid q. + """ + for robot_model in [g1_robot_model]: + nq = robot_model.num_dofs + + # Construct a valid configuration (e.g., zero vector) + q_valid = np.zeros(nq) + + # Should not raise any exception + robot_model.cache_forward_kinematics(q_valid) + + +def test_robot_model_forward_kinematics_invalid_q(g1_robot_model): + """ + Test that cache_forward_kinematics raises an error with an invalid q. + """ + for robot_model in [g1_robot_model]: + nq = robot_model.num_dofs + + # Construct an invalid configuration (wrong size) + q_invalid = np.zeros(nq + 1) + + with pytest.raises(ValueError, match="Expected q of length"): + robot_model.cache_forward_kinematics(q_invalid) + + +def test_robot_model_frame_placement(g1_robot_model): + """ + Test the frame_placement method with a valid and invalid frame name. + Also test that frame placements change with different configurations. + """ + for robot_model in [g1_robot_model]: + # Skip if no supplemental info + if robot_model.supplemental_info is None: + pytest.skip("No supplemental info available for testing") + + # Use the hand frame from supplemental info + test_frame = robot_model.supplemental_info.hand_frame_names["left"] + + # Test with zero configuration + q_zero = np.zeros(robot_model.num_dofs) + robot_model.cache_forward_kinematics(q_zero) + placement_zero = robot_model.frame_placement(test_frame) + assert isinstance(placement_zero, pin.SE3) + + # Test with non-zero configuration + q_non_zero = np.zeros(robot_model.num_dofs) + root_nq = 7 if robot_model.is_floating_base_model else 0 + + # Set a more significant configuration change + # Use π/2 for all joints to create a more noticeable difference + q_non_zero[root_nq:] = np.pi / 2 # 90 degrees for all joints + + robot_model.cache_forward_kinematics(q_non_zero) + placement_non_zero = robot_model.frame_placement(test_frame) + + # Verify that frame placements are different with different configurations + assert not np.allclose( + placement_zero.translation, placement_non_zero.translation + ) or not np.allclose(placement_zero.rotation, placement_non_zero.rotation) + + # Should raise an error for an invalid frame + with pytest.raises(ValueError, match="Unknown frame"): + robot_model.frame_placement("non_existent_frame") + + +# Tests for ReducedRobotModel +def test_reduced_robot_model_initialization(g1_robot_model): + """ + Test initialization of the ReducedRobotModel. + """ + for robot_model in [g1_robot_model]: + # Create a reduced model by fixing some actual joints from the robot + fixed_joints = robot_model.joint_names[:2] # Use first two joints from the robot + reduced_robot = ReducedRobotModel(robot_model, fixed_joints) + + # Check that the full robot is stored + assert reduced_robot.full_robot is robot_model + + # Check that fixed joints are stored correctly + assert reduced_robot.fixed_joints == fixed_joints + assert len(reduced_robot.fixed_values) == len(fixed_joints) + + # Check that the number of dofs is reduced + assert reduced_robot.num_dofs == robot_model.num_dofs - len(fixed_joints) + + +def test_reduced_robot_model_joint_names(g1_robot_model): + """ + Test that joint_names in ReducedRobotModel excludes fixed joints. + """ + for robot_model in [g1_robot_model]: + # Use actual joints from the robot + fixed_joints = robot_model.joint_names[:2] # Use first two joints from the robot + reduced_robot = ReducedRobotModel(robot_model, fixed_joints) + + # Check that fixed joints are not in the reduced model's joint names + for joint in fixed_joints: + assert joint not in reduced_robot.joint_names + + # Check that other joints are still present + for joint in robot_model.joint_names: + if joint not in fixed_joints: + assert joint in reduced_robot.joint_names + + +def test_reduced_robot_model_configuration_conversion(g1_robot_model): + """ + Test conversion between reduced and full configurations. + """ + for robot_model in [g1_robot_model]: + # Use actual joints from the robot + fixed_joints = robot_model.joint_names[:2] # Use first two joints from the robot + fixed_values = [0.5, 1.0] + reduced_robot = ReducedRobotModel(robot_model, fixed_joints, fixed_values) + + # Create a reduced configuration + q_reduced = np.zeros(reduced_robot.num_dofs) + q_reduced[0] = 0.3 # Set some value for testing + + # Convert to full configuration + q_full = reduced_robot.reduced_to_full_configuration(q_reduced) + + # Check that fixed joints have the correct values + for joint_name, value in zip(fixed_joints, fixed_values): + full_idx = robot_model.dof_index(joint_name) + assert q_full[full_idx] == value + + # Convert back to reduced configuration + q_reduced_back = reduced_robot.full_to_reduced_configuration(q_full) + + # Check that the conversion is reversible + np.testing.assert_array_almost_equal(q_reduced, q_reduced_back) + + +def test_reduced_robot_model_forward_kinematics(g1_robot_model): + """ + Test forward kinematics with the reduced model. + """ + for robot_model in [g1_robot_model]: + # Use actual joints from the robot + fixed_joints = robot_model.joint_names[:2] # Use first two joints from the robot + reduced_robot = ReducedRobotModel(robot_model, fixed_joints) + + # Create a reduced configuration + q_reduced = np.zeros(reduced_robot.num_dofs) + + # Should not raise any exception + reduced_robot.cache_forward_kinematics(q_reduced) + + # Check that frame placement works + model = robot_model.pinocchio_wrapper.model + if len(model.frames) > 1: + valid_frame = model.frames[1].name + placement = reduced_robot.frame_placement(valid_frame) + assert isinstance(placement, pin.SE3) + + +def test_robot_model_clip_configuration(g1_robot_model): + """ + Test that clip_configuration properly clips values to joint limits. + """ + for robot_model in [g1_robot_model]: + # Create a configuration with some values outside limits + q = np.zeros(robot_model.num_dofs) + root_nq = 7 if robot_model.is_floating_base_model else 0 + # Create extreme values for all joints + q[root_nq:] = np.array([100.0, -100.0, 50.0, -50.0] * (robot_model.num_joints // 4 + 1))[ + : robot_model.num_joints + ] + + # Clip the configuration + q_clipped = robot_model.clip_configuration(q) + + # Check that values are within limits + assert np.all(q_clipped[root_nq:] <= robot_model.upper_joint_limits) + assert np.all(q_clipped[root_nq:] >= robot_model.lower_joint_limits) + + +def test_robot_model_get_actuated_joints(g1_robot_model): + """ + Test getting body and hand actuated joints from configuration. + """ + for robot_model in [g1_robot_model]: + # Skip if no supplemental info + if robot_model.supplemental_info is None: + pytest.skip("No supplemental info available for testing actuated joints") + + # Create a test configuration + q = np.zeros(robot_model.num_dofs) + root_nq = 7 if robot_model.is_floating_base_model else 0 + q[root_nq:] = np.arange(robot_model.num_joints) # Set some values for joints + + # Test body actuated joints + body_joints = robot_model.get_body_actuated_joints(q) + assert len(body_joints) == len(robot_model.get_body_actuated_joint_indices()) + + # Test hand actuated joints + hand_joints = robot_model.get_hand_actuated_joints(q) + assert len(hand_joints) == len(robot_model.get_hand_actuated_joint_indices()) + + # Test left hand joints + left_hand_joints = robot_model.get_hand_actuated_joints(q, side="left") + assert len(left_hand_joints) == len(robot_model.get_hand_actuated_joint_indices("left")) + + # Test right hand joints + right_hand_joints = robot_model.get_hand_actuated_joints(q, side="right") + assert len(right_hand_joints) == len(robot_model.get_hand_actuated_joint_indices("right")) + + +def test_robot_model_get_configuration_from_actuated_joints(g1_robot_model): + """ + Test creating full configuration from actuated joint values. + """ + for robot_model in [g1_robot_model]: + # Skip if no supplemental info + if robot_model.supplemental_info is None: + pytest.skip("No supplemental info available for testing actuated joints") + + # Create test values for body and hands + body_values = np.ones(len(robot_model.get_body_actuated_joint_indices())) + hand_values = np.ones(len(robot_model.get_hand_actuated_joint_indices())) + left_hand_values = np.ones(len(robot_model.get_hand_actuated_joint_indices("left"))) + right_hand_values = np.ones(len(robot_model.get_hand_actuated_joint_indices("right"))) + + # Test with combined hand values + q = robot_model.get_configuration_from_actuated_joints( + body_actuated_joint_values=body_values, hand_actuated_joint_values=hand_values + ) + assert q.shape == (robot_model.num_dofs,) + + # Test with separate hand values + q = robot_model.get_configuration_from_actuated_joints( + body_actuated_joint_values=body_values, + left_hand_actuated_joint_values=left_hand_values, + right_hand_actuated_joint_values=right_hand_values, + ) + assert q.shape == (robot_model.num_dofs,) + + +def test_robot_model_reset_forward_kinematics(g1_robot_model): + """ + Test resetting forward kinematics to default configuration. + """ + for robot_model in [g1_robot_model]: + # Skip if no supplemental info + if robot_model.supplemental_info is None: + pytest.skip("No supplemental info available for testing") + + # Create a more significant configuration change + q = np.zeros(robot_model.num_dofs) + root_nq = 7 if robot_model.is_floating_base_model else 0 + # Set some extreme joint angles + q[root_nq:] = np.pi / 2 # 90 degrees for all joints + robot_model.cache_forward_kinematics(q) + + # Use a hand frame from supplemental info + test_frame = robot_model.supplemental_info.hand_frame_names["left"] + + # Reset to default + robot_model.reset_forward_kinematics() + # Get frame placement after reset + placement_default = robot_model.frame_placement(test_frame) + + # Check that frame placement matches what we get with q_zero + robot_model.cache_forward_kinematics(robot_model.q_zero) + placement_q_zero = robot_model.frame_placement(test_frame) + np.testing.assert_array_almost_equal( + placement_default.translation, placement_q_zero.translation + ) + np.testing.assert_array_almost_equal(placement_default.rotation, placement_q_zero.rotation) + + +# Additional tests for ReducedRobotModel +def test_reduced_robot_model_clip_configuration(g1_robot_model): + """ + Test that clip_configuration works in reduced space. + """ + for robot_model in [g1_robot_model]: + fixed_joints = robot_model.joint_names[:2] + reduced_robot = ReducedRobotModel(robot_model, fixed_joints) + + # Create a configuration with some values outside limits + q_reduced = np.zeros(reduced_robot.num_dofs) + root_nq = 7 if reduced_robot.full_robot.is_floating_base_model else 0 + # Create extreme values for all joints + q_reduced[root_nq:] = np.array( + [100.0, -100.0, 50.0, -50.0] * (reduced_robot.num_joints // 4 + 1) + )[: reduced_robot.num_joints] + + # Clip the configuration + q_clipped = reduced_robot.clip_configuration(q_reduced) + + # Check that values are within limits + assert np.all(q_clipped[root_nq:] <= reduced_robot.upper_joint_limits) + assert np.all(q_clipped[root_nq:] >= reduced_robot.lower_joint_limits) + + +def test_reduced_robot_model_get_actuated_joints(g1_robot_model): + """ + Test getting body and hand actuated joints from reduced configuration. + """ + for robot_model in [g1_robot_model]: + # Skip if no supplemental info + if robot_model.supplemental_info is None: + pytest.skip("No supplemental info available for testing actuated joints") + + fixed_joints = robot_model.joint_names[:2] + reduced_robot = ReducedRobotModel(robot_model, fixed_joints) + + # Create a test configuration + q_reduced = np.zeros(reduced_robot.num_dofs) + root_nq = 7 if reduced_robot.full_robot.is_floating_base_model else 0 + q_reduced[root_nq:] = np.arange(reduced_robot.num_joints) + + # Test body actuated joints + body_joints = reduced_robot.get_body_actuated_joints(q_reduced) + assert len(body_joints) == len(reduced_robot.get_body_actuated_joint_indices()) + + # Test hand actuated joints + hand_joints = reduced_robot.get_hand_actuated_joints(q_reduced) + assert len(hand_joints) == len(reduced_robot.get_hand_actuated_joint_indices()) + + +def test_reduced_robot_model_get_configuration_from_actuated_joints(g1_robot_model): + """ + Test creating reduced configuration from actuated joint values. + """ + for robot_model in [g1_robot_model]: + # Skip if no supplemental info + if robot_model.supplemental_info is None: + pytest.skip("No supplemental info available for testing actuated joints") + + fixed_joints = robot_model.joint_names[:2] + reduced_robot = ReducedRobotModel(robot_model, fixed_joints) + + # Create test values for body and hands + body_values = np.ones(len(reduced_robot.get_body_actuated_joint_indices())) + hand_values = np.ones(len(reduced_robot.get_hand_actuated_joint_indices())) + left_hand_values = np.ones(len(reduced_robot.get_hand_actuated_joint_indices("left"))) + right_hand_values = np.ones(len(reduced_robot.get_hand_actuated_joint_indices("right"))) + + # Test with combined hand values + q_reduced = reduced_robot.get_configuration_from_actuated_joints( + body_actuated_joint_values=body_values, hand_actuated_joint_values=hand_values + ) + assert q_reduced.shape == (reduced_robot.num_dofs,) + + # Test with separate hand values + q_reduced = reduced_robot.get_configuration_from_actuated_joints( + body_actuated_joint_values=body_values, + left_hand_actuated_joint_values=left_hand_values, + right_hand_actuated_joint_values=right_hand_values, + ) + assert q_reduced.shape == (reduced_robot.num_dofs,) + + # Verify that the values were set correctly in the reduced configuration + # Check body actuated joints + body_indices = reduced_robot.get_body_actuated_joint_indices() + np.testing.assert_array_almost_equal(q_reduced[body_indices], body_values) + + # Check left hand actuated joints + left_hand_indices = reduced_robot.get_hand_actuated_joint_indices("left") + np.testing.assert_array_almost_equal(q_reduced[left_hand_indices], left_hand_values) + + # Check right hand actuated joints + right_hand_indices = reduced_robot.get_hand_actuated_joint_indices("right") + np.testing.assert_array_almost_equal(q_reduced[right_hand_indices], right_hand_values) + + +def test_reduced_robot_model_reset_forward_kinematics(g1_robot_model): + """ + Test resetting forward kinematics in reduced model. + """ + for robot_model in [g1_robot_model]: + # Skip if no supplemental info + if robot_model.supplemental_info is None: + pytest.skip("No supplemental info available for testing") + + fixed_joints = robot_model.joint_names[:2] + reduced_robot = ReducedRobotModel(robot_model, fixed_joints) + + # Create a more significant configuration change + q_reduced = np.zeros(reduced_robot.num_dofs) + root_nq = 7 if reduced_robot.full_robot.is_floating_base_model else 0 + # Set some extreme joint angles + q_reduced[root_nq:] = np.pi / 2 # 90 degrees for all joints + reduced_robot.cache_forward_kinematics(q_reduced) + + # Reset to default + reduced_robot.reset_forward_kinematics() + + # Check that frame placement matches what we get with q_zero + reduced_robot.cache_forward_kinematics(reduced_robot.q_zero) + placement_q_zero = reduced_robot.frame_placement( + reduced_robot.supplemental_info.hand_frame_names["left"] + ) + placement_reset = reduced_robot.frame_placement( + reduced_robot.supplemental_info.hand_frame_names["left"] + ) + np.testing.assert_array_almost_equal( + placement_reset.translation, placement_q_zero.translation + ) + np.testing.assert_array_almost_equal(placement_reset.rotation, placement_q_zero.rotation) + + +def test_reduced_robot_model_from_fixed_groups(g1_robot_model): + """ + Test creating reduced model from fixed joint groups. + """ + for robot_model in [g1_robot_model]: + # Skip if no supplemental info + if robot_model.supplemental_info is None: + pytest.skip("No supplemental info available for testing joint groups") + + # Get a group name from the supplemental info + group_name = next(iter(robot_model.supplemental_info.joint_groups.keys())) + group_info = robot_model.supplemental_info.joint_groups[group_name] + + # Get all joints that should be fixed (including those from subgroups) + expected_fixed_joints = set() + # Add direct joints + expected_fixed_joints.update(group_info["joints"]) + # Add joints from subgroups + for subgroup_name in group_info["groups"]: + subgroup_joints = robot_model.get_joint_group_indices(subgroup_name) + expected_fixed_joints.update([robot_model.joint_names[idx] for idx in subgroup_joints]) + + # Test from_fixed_groups + reduced_robot = ReducedRobotModel.from_fixed_groups(robot_model, [group_name]) + assert reduced_robot.full_robot is robot_model + + # Verify that fixed joints are not in reduced model's joint names + for joint in expected_fixed_joints: + assert joint not in reduced_robot.joint_names + + # Verify that fixed joints maintain their values in configuration + q_reduced = np.ones(reduced_robot.num_dofs) # Set some non-zero values + q_full = reduced_robot.reduced_to_full_configuration(q_reduced) + + # Get the fixed values from the reduced model + fixed_values = dict(zip(reduced_robot.fixed_joints, reduced_robot.fixed_values)) + + # Check that all expected fixed joints have their values preserved + for joint in expected_fixed_joints: + full_idx = robot_model.dof_index(joint) + assert q_full[full_idx] == fixed_values[joint] + + # Test from_fixed_group (convenience method) + reduced_robot = ReducedRobotModel.from_fixed_group(robot_model, group_name) + assert reduced_robot.full_robot is robot_model + + # Verify that fixed joints are not in reduced model's joint names + for joint in expected_fixed_joints: + assert joint not in reduced_robot.joint_names + + # Verify that fixed joints maintain their values in configuration + q_reduced = np.ones(reduced_robot.num_dofs) # Set some non-zero values + q_full = reduced_robot.reduced_to_full_configuration(q_reduced) + + # Get the fixed values from the reduced model + fixed_values = dict(zip(reduced_robot.fixed_joints, reduced_robot.fixed_values)) + + # Check that all expected fixed joints have their values preserved + for joint in expected_fixed_joints: + full_idx = robot_model.dof_index(joint) + assert q_full[full_idx] == fixed_values[joint] + + +def test_reduced_robot_model_from_active_groups(g1_robot_model): + """ + Test creating reduced model from active joint groups. + """ + for robot_model in [g1_robot_model]: + # Skip if no supplemental info + if robot_model.supplemental_info is None: + pytest.skip("No supplemental info available for testing joint groups") + + # Get a group name from the supplemental info + group_name = next(iter(robot_model.supplemental_info.joint_groups.keys())) + group_info = robot_model.supplemental_info.joint_groups[group_name] + + # Get all joints that should be active (including those from subgroups) + expected_active_joints = set() + # Add direct joints + expected_active_joints.update(group_info["joints"]) + # Add joints from subgroups + for subgroup_name in group_info["groups"]: + subgroup_joints = robot_model.get_joint_group_indices(subgroup_name) + expected_active_joints.update([robot_model.joint_names[idx] for idx in subgroup_joints]) + + # Get all joints from the model + all_joints = set(robot_model.joint_names) + # The fixed joints should be all joints minus the active joints + expected_fixed_joints = all_joints - expected_active_joints + + # Test from_active_groups + reduced_robot = ReducedRobotModel.from_active_groups(robot_model, [group_name]) + assert reduced_robot.full_robot is robot_model + + # Verify that active joints are in reduced model's joint names + for joint in expected_active_joints: + assert joint in reduced_robot.joint_names + + # Verify that fixed joints are not in reduced model's joint names + for joint in expected_fixed_joints: + assert joint not in reduced_robot.joint_names + + # Verify that fixed joints maintain their values in configuration + q_reduced = np.ones(reduced_robot.num_dofs) # Set some non-zero values + q_full = reduced_robot.reduced_to_full_configuration(q_reduced) + + # Get the fixed values from the reduced model + fixed_values = dict(zip(reduced_robot.fixed_joints, reduced_robot.fixed_values)) + + # Check that all expected fixed joints have their values preserved + for joint in expected_fixed_joints: + full_idx = robot_model.dof_index(joint) + assert q_full[full_idx] == fixed_values[joint] + + # Test from_active_group (convenience method) + reduced_robot = ReducedRobotModel.from_active_group(robot_model, group_name) + assert reduced_robot.full_robot is robot_model + + # Verify that active joints are in reduced model's joint names + for joint in expected_active_joints: + assert joint in reduced_robot.joint_names + + # Verify that fixed joints are not in reduced model's joint names + for joint in expected_fixed_joints: + assert joint not in reduced_robot.joint_names + + # Verify that fixed joints maintain their values in configuration + q_reduced = np.ones(reduced_robot.num_dofs) # Set some non-zero values + q_full = reduced_robot.reduced_to_full_configuration(q_reduced) + + # Get the fixed values from the reduced model + fixed_values = dict(zip(reduced_robot.fixed_joints, reduced_robot.fixed_values)) + + # Check that all expected fixed joints have their values preserved + for joint in expected_fixed_joints: + full_idx = robot_model.dof_index(joint) + assert q_full[full_idx] == fixed_values[joint] + + +def test_reduced_robot_model_frame_placement(g1_robot_model): + """ + Test the frame_placement method in reduced model with a valid and invalid frame name. + Also test that frame placements change with different configurations. + """ + for robot_model in [g1_robot_model]: + # Skip if no supplemental info + if robot_model.supplemental_info is None: + pytest.skip("No supplemental info available for testing") + + # Create a reduced model by fixing some joints + fixed_joints = robot_model.joint_names[:2] + reduced_robot = ReducedRobotModel(robot_model, fixed_joints) + + # Use the hand frame from supplemental info + test_frame = reduced_robot.supplemental_info.hand_frame_names["left"] + + # Test with zero configuration + q_reduced_zero = np.zeros(reduced_robot.num_dofs) + reduced_robot.cache_forward_kinematics(q_reduced_zero) + placement_zero = reduced_robot.frame_placement(test_frame) + assert isinstance(placement_zero, pin.SE3) + + # Test with non-zero configuration + q_reduced_non_zero = np.zeros(reduced_robot.num_dofs) + root_nq = 7 if reduced_robot.full_robot.is_floating_base_model else 0 + + # Set a valid non-zero value for each joint + for i in range(root_nq, reduced_robot.num_dofs): + # Use a value that's within the joint limits + q_reduced_non_zero[i] = 0.5 # 0.5 radians is within most joint limits + + reduced_robot.cache_forward_kinematics(q_reduced_non_zero) + placement_non_zero = reduced_robot.frame_placement(test_frame) + + # Verify that frame placements are different with different configurations + assert not np.allclose( + placement_zero.translation, placement_non_zero.translation + ) or not np.allclose(placement_zero.rotation, placement_non_zero.rotation) + + # Should raise an error for an invalid frame + with pytest.raises(ValueError, match="Unknown frame"): + reduced_robot.frame_placement("non_existent_frame") + + +def test_robot_model_gravity_compensation_basic(g1_robot_model): + """ + Test basic gravity compensation functionality. + """ + for robot_model in [g1_robot_model]: + # Skip if no supplemental info + if robot_model.supplemental_info is None: + pytest.skip("No supplemental info available for testing gravity compensation") + + # Create a valid configuration + q = np.zeros(robot_model.num_dofs) + if robot_model.is_floating_base_model: + # Set floating base to upright position + q[:7] = [0, 0, 1.0, 0, 0, 0, 1] # [x, y, z, qx, qy, qz, qw] + + # Test gravity compensation for all joints + gravity_torques = robot_model.compute_gravity_compensation_torques(q) + + # Check output shape + assert gravity_torques.shape == (robot_model.num_dofs,) + + # For a humanoid robot with arms, there should be some non-zero gravity torques + assert np.any(np.abs(gravity_torques) > 1e-6), "Expected some non-zero gravity torques" + + +def test_robot_model_gravity_compensation_joint_groups(g1_robot_model): + """ + Test gravity compensation with different joint group specifications. + """ + for robot_model in [g1_robot_model]: + # Skip if no supplemental info + if robot_model.supplemental_info is None: + pytest.skip("No supplemental info available for testing gravity compensation") + + # Create a valid configuration + q = np.zeros(robot_model.num_dofs) + if robot_model.is_floating_base_model: + q[:7] = [0, 0, 1.0, 0, 0, 0, 1] + + # Get available joint groups + available_groups = list(robot_model.supplemental_info.joint_groups.keys()) + if not available_groups: + pytest.skip("No joint groups available for testing") + + test_group = available_groups[0] # Use first available group + + # Test with string input + gravity_str = robot_model.compute_gravity_compensation_torques(q, test_group) + assert gravity_str.shape == (robot_model.num_dofs,) + + # Test with list input + gravity_list = robot_model.compute_gravity_compensation_torques(q, [test_group]) + np.testing.assert_array_equal(gravity_str, gravity_list) + + # Test with set input + gravity_set = robot_model.compute_gravity_compensation_torques(q, {test_group}) + np.testing.assert_array_equal(gravity_str, gravity_set) + + # Test that compensation is selective (some joints should be zero) + group_indices = robot_model.get_joint_group_indices(test_group) + if len(group_indices) < robot_model.num_dofs: + # Check that only specified joints have compensation + non_zero_mask = np.abs(gravity_str) > 1e-6 + compensated_indices = np.where(non_zero_mask)[0] + # The compensated indices should be a subset of the group indices + assert len(compensated_indices) <= len(group_indices) + + +def test_robot_model_gravity_compensation_multiple_groups(g1_robot_model): + """ + Test gravity compensation with multiple joint groups. + """ + for robot_model in [g1_robot_model]: + # Skip if no supplemental info + if robot_model.supplemental_info is None: + pytest.skip("No supplemental info available for testing gravity compensation") + + # Create a valid configuration + q = np.zeros(robot_model.num_dofs) + if robot_model.is_floating_base_model: + q[:7] = [0, 0, 1.0, 0, 0, 0, 1] + + # Get available joint groups + available_groups = list(robot_model.supplemental_info.joint_groups.keys()) + if len(available_groups) < 2: + pytest.skip("Need at least 2 joint groups for testing") + + # Test with multiple groups + test_groups = available_groups[:2] + gravity_multiple = robot_model.compute_gravity_compensation_torques(q, test_groups) + assert gravity_multiple.shape == (robot_model.num_dofs,) + + # Test individual groups + gravity_1 = robot_model.compute_gravity_compensation_torques(q, test_groups[0]) + gravity_2 = robot_model.compute_gravity_compensation_torques(q, test_groups[1]) + + # The multiple group result should have at least as many non-zero elements + # as either individual group (could be more due to overlaps) + nonzero_multiple = np.count_nonzero(np.abs(gravity_multiple) > 1e-6) + nonzero_1 = np.count_nonzero(np.abs(gravity_1) > 1e-6) + nonzero_2 = np.count_nonzero(np.abs(gravity_2) > 1e-6) + assert nonzero_multiple >= max(nonzero_1, nonzero_2) + + +def test_robot_model_gravity_compensation_configuration_dependency(g1_robot_model): + """ + Test that gravity compensation changes with robot configuration. + """ + for robot_model in [g1_robot_model]: + # Skip if no supplemental info + if robot_model.supplemental_info is None: + pytest.skip("No supplemental info available for testing gravity compensation") + + # Get available joint groups - prefer arms if available + available_groups = list(robot_model.supplemental_info.joint_groups.keys()) + test_group = None + for group in ["arms", "left_arm", "right_arm"]: + if group in available_groups: + test_group = group + break + if test_group is None and available_groups: + test_group = available_groups[0] + if test_group is None: + pytest.skip("No joint groups available for testing") + + # Test with different configurations + q1 = np.zeros(robot_model.num_dofs) + q2 = np.zeros(robot_model.num_dofs) + + if robot_model.is_floating_base_model: + # Both configurations upright but different joint positions + q1[:7] = [0, 0, 1.0, 0, 0, 0, 1] + q2[:7] = [0, 0, 1.0, 0, 0, 0, 1] + + # Change arm joint positions specifically (not random joints) + # This ensures we actually change joints that affect the gravity compensation + try: + arm_indices = robot_model.get_joint_group_indices(test_group) + if len(arm_indices) >= 2: + # Change first two arm joints significantly + q2[arm_indices[0]] = np.pi / 4 # 45 degrees + q2[arm_indices[1]] = np.pi / 6 # 30 degrees + elif len(arm_indices) >= 1: + # Change first arm joint if only one available + q2[arm_indices[0]] = np.pi / 3 # 60 degrees + except Exception: + # Fallback to changing some joints if arm indices not available + if robot_model.is_floating_base_model and robot_model.num_dofs > 9: + q2[7] = np.pi / 4 + q2[8] = np.pi / 6 + elif not robot_model.is_floating_base_model and robot_model.num_dofs > 2: + q2[0] = np.pi / 4 + q2[1] = np.pi / 6 + + # Compute gravity compensation for both configurations + gravity_1 = robot_model.compute_gravity_compensation_torques(q1, test_group) + gravity_2 = robot_model.compute_gravity_compensation_torques(q2, test_group) + + # They should be different (unless all compensated joints didn't change) + # Allow for small numerical differences + assert not np.allclose( + gravity_1, gravity_2, atol=1e-10 + ), "Gravity compensation should change with configuration" + + +def test_robot_model_gravity_compensation_error_handling(g1_robot_model): + """ + Test error handling in gravity compensation. + """ + for robot_model in [g1_robot_model]: + # Test with wrong configuration size + q_wrong = np.zeros(robot_model.num_dofs + 1) + with pytest.raises(ValueError, match="Expected q of length"): + robot_model.compute_gravity_compensation_torques(q_wrong) + + # Test with invalid joint group + q_valid = np.zeros(robot_model.num_dofs) + if robot_model.is_floating_base_model: + q_valid[:7] = [0, 0, 1.0, 0, 0, 0, 1] + + with pytest.raises(RuntimeError, match="Error computing gravity compensation"): + robot_model.compute_gravity_compensation_torques(q_valid, "non_existent_group") + + # Test with mixed valid/invalid groups + if robot_model.supplemental_info is not None: + available_groups = list(robot_model.supplemental_info.joint_groups.keys()) + if available_groups: + valid_group = available_groups[0] + with pytest.raises(RuntimeError, match="Error computing gravity compensation"): + robot_model.compute_gravity_compensation_torques( + q_valid, [valid_group, "non_existent_group"] + ) + + +def test_robot_model_gravity_compensation_auto_clip(g1_robot_model): + """ + Test auto-clipping functionality in gravity compensation. + """ + for robot_model in [g1_robot_model]: + # Skip if no supplemental info + if robot_model.supplemental_info is None: + pytest.skip("No supplemental info available for testing gravity compensation") + + # Create configuration with values outside joint limits + q = np.zeros(robot_model.num_dofs) + root_nq = 7 if robot_model.is_floating_base_model else 0 + + if robot_model.is_floating_base_model: + q[:7] = [0, 0, 1.0, 0, 0, 0, 1] # Valid floating base + + # Set extreme joint values (outside limits) + if robot_model.num_dofs > root_nq: + q[root_nq:] = 100.0 # Very large values + + # Should work with auto_clip=True (default) + try: + gravity_clipped = robot_model.compute_gravity_compensation_torques(q, auto_clip=True) + assert gravity_clipped.shape == (robot_model.num_dofs,) + except Exception as e: + pytest.skip(f"Auto-clip test skipped due to: {e}") + + # Test with auto_clip=False - might work or might not depending on limits + try: + gravity_no_clip = robot_model.compute_gravity_compensation_torques(q, auto_clip=False) + assert gravity_no_clip.shape == (robot_model.num_dofs,) + except Exception: + # This is expected if the configuration is invalid + pass + + +def test_robot_model_gravity_compensation_arms_specific(g1_robot_model): + """ + Test gravity compensation specifically for arm joints (if available). + """ + for robot_model in [g1_robot_model]: + # Skip if no supplemental info + if robot_model.supplemental_info is None: + pytest.skip("No supplemental info available for testing gravity compensation") + + available_groups = list(robot_model.supplemental_info.joint_groups.keys()) + + # Test arms specifically if available + if "arms" in available_groups: + q = np.zeros(robot_model.num_dofs) + if robot_model.is_floating_base_model: + q[:7] = [0, 0, 1.0, 0, 0, 0, 1] + + # Test arms gravity compensation + gravity_arms = robot_model.compute_gravity_compensation_torques(q, "arms") + assert gravity_arms.shape == (robot_model.num_dofs,) + + # Test left and right arms separately if available + if "left_arm" in available_groups and "right_arm" in available_groups: + gravity_left = robot_model.compute_gravity_compensation_torques(q, "left_arm") + gravity_right = robot_model.compute_gravity_compensation_torques(q, "right_arm") + + # Both arms should have non-zero compensation (for typical configurations) + if np.any(np.abs(gravity_arms) > 1e-6): + # If arms have compensation, at least one of left/right should too + assert np.any(np.abs(gravity_left) > 1e-6) or np.any( + np.abs(gravity_right) > 1e-6 + ) + else: + pytest.skip("No arm joint groups available for testing") diff --git a/tests/control/teleop/__init__.py b/decoupled_wbc/tests/control/teleop/__init__.py similarity index 100% rename from tests/control/teleop/__init__.py rename to decoupled_wbc/tests/control/teleop/__init__.py diff --git a/decoupled_wbc/tests/control/teleop/test_teleop_retargeting_ik.py b/decoupled_wbc/tests/control/teleop/test_teleop_retargeting_ik.py new file mode 100644 index 0000000..ef3a832 --- /dev/null +++ b/decoupled_wbc/tests/control/teleop/test_teleop_retargeting_ik.py @@ -0,0 +1,196 @@ +import time + +import numpy as np +import pytest + +from decoupled_wbc.control.robot_model.instantiation.g1 import instantiate_g1_robot_model +from decoupled_wbc.control.robot_model.robot_model import RobotModel +from decoupled_wbc.control.teleop.solver.hand.instantiation.g1_hand_ik_instantiation import ( + instantiate_g1_hand_ik_solver, +) +from decoupled_wbc.control.teleop.teleop_retargeting_ik import TeleopRetargetingIK + + +@pytest.fixture(params=["lower_body", "lower_and_upper_body"]) +def retargeting_ik(request): + waist_location = request.param + robot_model = instantiate_g1_robot_model(waist_location=waist_location) + left_hand_ik_solver, right_hand_ik_solver = instantiate_g1_hand_ik_solver() + return TeleopRetargetingIK( + robot_model=robot_model, + left_hand_ik_solver=left_hand_ik_solver, + right_hand_ik_solver=right_hand_ik_solver, + enable_visualization=False, # Change to true to visualize movements + body_active_joint_groups=["upper_body"], + ) + + +def generate_target_wrist_poses(mode: str, side: str, full_robot: RobotModel) -> dict: + """ + Args: + mode: One of "rotation" or "translation" + side: One of "left" or "right" - specifies which side to animate + Returns: + Dictionary mapping link names to target poses for both wrists + """ + + assert mode in ["rotation", "translation", "both"] + assert side in ["left", "right", "both"] + + # Set up initial state + full_robot.cache_forward_kinematics(full_robot.q_zero) + + # Get both wrist link names + left_wrist_link = full_robot.supplemental_info.hand_frame_names["left"] + right_wrist_link = full_robot.supplemental_info.hand_frame_names["right"] + + # Initialize default poses for both sides + left_default_pose = full_robot.frame_placement(left_wrist_link).np + right_default_pose = full_robot.frame_placement(right_wrist_link).np + + left_initial_pose_matrix = full_robot.frame_placement(left_wrist_link).np + right_initial_pose_matrix = full_robot.frame_placement(right_wrist_link).np + + # Constants + translation_cycle_duration = 4.0 + rotation_cycle_duration = 4.0 + total_duration = 12.0 + translation_amplitude = 0.1 + rotation_amplitude = np.deg2rad(60) # 30 degrees + + body_data_list = [] + for t in np.linspace(0, total_duration, 100): + rotation_matrix = np.eye(3) + current_left_translation_vector = left_initial_pose_matrix[:3, 3].copy() + current_right_translation_vector = right_initial_pose_matrix[:3, 3].copy() + + if mode == "rotation" or mode == "both": + # For rotation-only mode, start rotating immediately + rotation_axis_index = int(t // rotation_cycle_duration) % 3 + time_within_cycle = t % rotation_cycle_duration + angle = rotation_amplitude * np.sin( + (2 * np.pi / rotation_cycle_duration) * time_within_cycle + ) + + if rotation_axis_index == 0: # Roll + rotation_matrix = np.array( + [ + [1, 0, 0], + [0, np.cos(angle), -np.sin(angle)], + [0, np.sin(angle), np.cos(angle)], + ] + ) + elif rotation_axis_index == 2: # Pitch + rotation_matrix = np.array( + [ + [np.cos(angle), 0, np.sin(angle)], + [0, 1, 0], + [-np.sin(angle), 0, np.cos(angle)], + ] + ) + else: # Yaw + rotation_matrix = np.array( + [ + [np.cos(angle), -np.sin(angle), 0], + [np.sin(angle), np.cos(angle), 0], + [0, 0, 1], + ] + ) + + if mode == "translation" or mode == "both": + translation_axis_index = int(t // translation_cycle_duration) % 3 + time_within_cycle = t % translation_cycle_duration + offset = translation_amplitude * np.sin( + (2 * np.pi / translation_cycle_duration) * time_within_cycle + ) + current_left_translation_vector[translation_axis_index] += offset + current_right_translation_vector[translation_axis_index] += offset + + # Construct the 4x4 pose matrix for the animated side + left_animated_pose = np.eye(4) + left_animated_pose[:3, :3] = rotation_matrix + left_animated_pose[:3, 3] = current_left_translation_vector + + right_animated_pose = np.eye(4) + right_animated_pose[:3, :3] = rotation_matrix + right_animated_pose[:3, 3] = current_right_translation_vector + + # Create body_data dictionary with both wrists + body_data = {} + if side == "left": + body_data[left_wrist_link] = left_animated_pose + body_data[right_wrist_link] = right_default_pose + elif side == "right": + body_data[left_wrist_link] = left_default_pose + body_data[right_wrist_link] = right_animated_pose + elif side == "both": + body_data[left_wrist_link] = left_animated_pose + body_data[right_wrist_link] = right_animated_pose + + body_data_list.append(body_data) + + return body_data_list + + +@pytest.mark.parametrize("mode", ["translation", "rotation"]) +@pytest.mark.parametrize("side", ["both", "left", "right"]) +def test_ik_matches_fk(retargeting_ik, mode, side): + full_robot = retargeting_ik.full_robot + + # Generate target wrist poses + body_data_list = generate_target_wrist_poses(mode, side, full_robot) + + max_pos_error = 0 + max_rot_error = 0 + + for body_data in body_data_list: + + time_start = time.time() + + # Run IK to get joint angles + q = retargeting_ik.compute_joint_positions( + body_data, + left_hand_data=None, # Hand IK not tested + right_hand_data=None, # Hand IK not tested + ) + + time_end = time.time() + ik_time = time_end - time_start + print(f"IK time: {ik_time} s") + # Test commented out because of inconsistency in CI/CD computation time + # assert ik_time < 0.05, f"IK time too high for 20Hz loop: {ik_time} s" + + # Run FK to compute where the wrists actually ended up + full_robot.cache_forward_kinematics(q, auto_clip=False) + left_wrist_link = full_robot.supplemental_info.hand_frame_names["left"] + right_wrist_link = full_robot.supplemental_info.hand_frame_names["right"] + T_fk_left = full_robot.frame_placement(left_wrist_link).np + T_fk_right = full_robot.frame_placement(right_wrist_link).np + T_target_left = body_data[left_wrist_link] + T_target_right = body_data[right_wrist_link] + + # Check that FK translation matches target translation + pos_fk_left = T_fk_left[:3, 3] + pos_target_left = T_target_left[:3, 3] + pos_fk_right = T_fk_right[:3, 3] + pos_target_right = T_target_right[:3, 3] + + max_pos_error = max(max_pos_error, np.linalg.norm(pos_fk_left - pos_target_left)) + max_pos_error = max(max_pos_error, np.linalg.norm(pos_fk_right - pos_target_right)) + + # Check that FK rotation matches target rotation + rot_fk_left = T_fk_left[:3, :3] + rot_target_left = T_target_left[:3, :3] + rot_diff_left = rot_fk_left @ rot_target_left.T + rot_error_left = np.arccos(np.clip((np.trace(rot_diff_left) - 1) / 2, -1, 1)) + rot_fk_right = T_fk_right[:3, :3] + rot_target_right = T_target_right[:3, :3] + rot_diff_right = rot_fk_right @ rot_target_right.T + rot_error_right = np.arccos(np.clip((np.trace(rot_diff_right) - 1) / 2, -1, 1)) + + max_rot_error = max(max_rot_error, rot_error_left) + max_rot_error = max(max_rot_error, rot_error_right) + + assert max_pos_error < 0.01 and max_rot_error < np.deg2rad( + 1 + ), f"Max position error: {max_pos_error}, Max rotation error: {np.rad2deg(max_rot_error)} deg" diff --git a/tests/control/visualization/__init__.py b/decoupled_wbc/tests/control/visualization/__init__.py similarity index 100% rename from tests/control/visualization/__init__.py rename to decoupled_wbc/tests/control/visualization/__init__.py diff --git a/decoupled_wbc/tests/control/visualization/test_meshcat_visualizer_env.py b/decoupled_wbc/tests/control/visualization/test_meshcat_visualizer_env.py new file mode 100644 index 0000000..fcebc67 --- /dev/null +++ b/decoupled_wbc/tests/control/visualization/test_meshcat_visualizer_env.py @@ -0,0 +1,88 @@ +import pathlib +import time + +import numpy as np +import pytest + +from decoupled_wbc.control.robot_model import RobotModel +from decoupled_wbc.control.robot_model.supplemental_info.g1.g1_supplemental_info import ( + G1SupplementalInfo, +) + + +@pytest.fixture +def env_fixture(): + """ + Pytest fixture that creates and yields the MeshcatVisualizerEnv. + After the test, it closes the environment to clean up. + """ + from decoupled_wbc.control.visualization.meshcat_visualizer_env import MeshcatVisualizerEnv + + root_dir = pathlib.Path(__file__).parent.parent.parent.parent + urdf_path = str( + root_dir / "decoupled_wbc/control/robot_model/model_data/g1/g1_29dof_with_hand.urdf" + ) + asset_path = str(root_dir / "decoupled_wbc/control/robot_model/model_data/g1") + robot_config = { + "asset_path": asset_path, + "urdf_path": urdf_path, + } + robot_model = RobotModel( + robot_config["urdf_path"], + robot_config["asset_path"], + supplemental_info=G1SupplementalInfo(), + ) + env = MeshcatVisualizerEnv(robot_model) + time.sleep(0.5) + yield env + env.close() + + +def test_meshcat_env_init(env_fixture): + """ + Test that the environment initializes without errors + and that reset() returns the proper data structure. + """ + env = env_fixture + initial_obs = env.reset() + assert isinstance(initial_obs, dict), "reset() should return a dictionary." + assert "q" in initial_obs, "The returned dictionary should contain key 'q'." + assert ( + len(initial_obs["q"]) == env.robot_model.num_dofs + ), "Length of 'q' should match the robot's DOF." + + +def test_meshcat_env_observation(env_fixture): + """ + Test that the observe() method returns a valid observation + conforming to the environment's observation space. + """ + env = env_fixture + observation = env.observe() + assert isinstance(observation, dict), "observe() should return a dictionary." + assert "q" in observation, "The returned dictionary should contain key 'q'." + assert ( + len(observation["q"]) == env.robot_model.num_dofs + ), "Length of 'q' should match the robot's DOF." + + +def test_meshcat_env_action(env_fixture): + """ + Test that we can queue an action and visualize it without error. + """ + env = env_fixture + # Build a dummy action within the action space + test_action = {"q": 0.2 * np.ones(env.robot_model.num_dofs)} + + # This should not raise an exception and should visualize the correct configuration + env.queue_action(test_action) + + +def test_meshcat_env_close(env_fixture): + """ + Test closing the environment. (Though the fixture calls env.close() + automatically, we can invoke it here to ensure it's safe to do so.) + """ + env = env_fixture + env.close() + # If close() triggers no exceptions, we're good. diff --git a/decoupled_wbc/tests/data/test_exporter.py b/decoupled_wbc/tests/data/test_exporter.py new file mode 100644 index 0000000..1d5739e --- /dev/null +++ b/decoupled_wbc/tests/data/test_exporter.py @@ -0,0 +1,522 @@ +import json +from pathlib import Path +import shutil +import tempfile +import time + +from lerobot.common.datasets.lerobot_dataset import LeRobotDataset +import numpy as np +import pytest + +from decoupled_wbc.data.exporter import DataCollectionInfo, Gr00tDataExporter + + +@pytest.fixture +def test_features(): + """Fixture providing test features dict.""" + return { + "observation.images.ego_view": { + "dtype": "video", + "shape": [64, 64, 3], # Small images for faster tests + "names": ["height", "width", "channel"], + }, + "observation.state": { + "dtype": "float32", + "shape": (8,), + "names": ["x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8"], + }, + "action": { + "dtype": "float32", + "shape": (8,), + "names": ["a1", "a2", "a3", "a4", "a5", "a6", "a7", "a8"], + }, + } + + +@pytest.fixture +def test_modality_config(): + return { + "state": {"feature1": {"start": 0, "end": 4}, "feature2": {"start": 4, "end": 9}}, + "action": {"feature1": {"start": 0, "end": 4}, "feature2": {"start": 4, "end": 9}}, + "video": {"rs_view": {"original_key": "observation.images.ego_view"}}, + "annotation": {"human.task_description": {"original_key": "task_index"}}, + } + + +@pytest.fixture +def test_data_collection_info(): + return DataCollectionInfo( + teleoperator_username="test_user", + support_operator_username="test_user", + robot_type="test_robot", + lower_body_policy="test_policy", + wbc_model_path="test_path", + ) + + +def get_test_frame(step: int): + """Generate a test frame with data that varies by step.""" + # Create a simple, small image that will encode quickly + img = np.ones((64, 64, 3), dtype=np.uint8) * (step % 255) + # Add a pattern to make each frame unique and verifiable + img[step % 64, :, :] = 255 - (step % 255) + + return { + "observation.images.ego_view": img, + "observation.state": np.ones(8, dtype=np.float32) * step, + "action": np.ones(8, dtype=np.float32) * step, + } + + +@pytest.fixture +def temp_dir(): + """Create a temporary directory for test data that's cleaned up after tests.""" + temp_dir = tempfile.mkdtemp() + yield Path(temp_dir) / "dataset" + shutil.rmtree(temp_dir) + + +class TestInterruptAndResume: + """Test class for simulating interruption and resumption of recording.""" + + # Skip this test if ffmpeg is not installed + @pytest.mark.skipif( + shutil.which("ffmpeg") is None, reason="ffmpeg not installed, skipping test" + ) + def test_interrupted_mid_episode( + self, temp_dir, test_features, test_modality_config, test_data_collection_info + ): + """ + Test that simulates a recording session that gets interrupted and then resumes. + + This test uses the actual Gr00tDataExporter implementation with no mocks. + """ + # Constants for the test + NUM_EPISODES = 2 + FRAMES_PER_EPISODE = 5 + + # Pick a random episode and frame to interrupt at + interrupt_episode = 1 + interrupt_frame = 3 + + print(f"Will interrupt at episode {interrupt_episode}, frame {interrupt_frame}") + + # Track what we've added to verify later + completed_episodes = [] + frames_added_first_session = 0 + + # Initial recording session + try: + # Start recording with real Gr00tDataExporter + exporter1 = Gr00tDataExporter.create( + save_root=temp_dir, + fps=30, + features=test_features, + modality_config=test_modality_config, + task="test_task", + robot_type="test_robot", + vcodec="libx264", # Use a common codec that should be available + data_collection_info=test_data_collection_info, + ) + + # Record episodes until interruption + for episode in range(NUM_EPISODES): + for frame in range(FRAMES_PER_EPISODE): + # Simulate interruption + if episode == interrupt_episode and frame == interrupt_frame: + print(f"Simulating interruption at episode {episode}, frame {frame}") + raise KeyboardInterrupt("Simulated interruption") + + # Add frame + exporter1.add_frame(get_test_frame(frame)) + frames_added_first_session += 1 + + # Save episode + exporter1.save_episode() + completed_episodes.append(episode) + + except KeyboardInterrupt: + print(f"Recording interrupted at episode {interrupt_episode}, frame {interrupt_frame}") + print(f"Completed episodes: {completed_episodes}") + # Don't consolidate since we're interrupted + pass + + # Verify what was recorded before interruption + assert len(completed_episodes) == interrupt_episode + assert ( + frames_added_first_session == interrupt_episode * FRAMES_PER_EPISODE + interrupt_frame + ) + + # Let file system operations complete + time.sleep(0.5) + + # Resume recording - create a new exporter pointing to the same directory + exporter2 = Gr00tDataExporter.create( + save_root=temp_dir, + fps=30, + features=test_features, + modality_config=test_modality_config, + task="test_task", + robot_type="test_robot", + vcodec="libx264", + ) + + # The interrupted episode had frames added but wasn't saved + # In a real scenario with the current implementation, we need to restart that episode + + # Record all episodes from the beginning + frames_added_second_session = 0 + episodes_saved_second_session = 0 + + for episode in range(NUM_EPISODES): + for frame in range(FRAMES_PER_EPISODE): + exporter2.add_frame(get_test_frame(frame)) + frames_added_second_session += 1 + + # Save episode + exporter2.save_episode() + episodes_saved_second_session += 1 + + # Verify the result + assert frames_added_second_session == NUM_EPISODES * FRAMES_PER_EPISODE + assert episodes_saved_second_session == NUM_EPISODES + + # Verify actual files were created + for episode_idx in range(NUM_EPISODES): + video_path = exporter2.root / exporter2.meta.get_video_file_path( + episode_idx, "observation.images.ego_view" + ) + assert video_path.exists(), f"Video file not found: {video_path}" + + @pytest.mark.skipif( + shutil.which("ffmpeg") is None, reason="ffmpeg not installed, skipping test" + ) + def test_interrupted_after_episode_completion( + self, temp_dir, test_features, test_modality_config, test_data_collection_info + ): + """ + Test specifically for the case when interruption happens after an episode is completed. + Uses the real Gr00tDataExporter implementation. + """ + # First session - record 1 complete episode and then interrupt + exporter1 = Gr00tDataExporter.create( + save_root=temp_dir, + fps=30, + features=test_features, + modality_config=test_modality_config, + task="test_task", + data_collection_info=test_data_collection_info, + vcodec="libx264", + ) + + # Record 1 complete episode + for frame in range(5): + exporter1.add_frame(get_test_frame(frame)) + exporter1.save_episode() + + # Let file system operations complete + time.sleep(0.5) + + # Verify the first episode was saved + video_path = exporter1.root / exporter1.meta.get_video_file_path( + 0, "observation.images.ego_view" + ) + assert video_path.exists(), f"First episode video file not found: {video_path}" + + # Second session - resume and record another episode + exporter2 = Gr00tDataExporter.create( + save_root=temp_dir, + fps=30, + features=test_features, + modality_config=test_modality_config, + task="test_task", + vcodec="libx264", + ) + + # Record the second episode + for frame in range(5): + exporter2.add_frame(get_test_frame(frame)) + exporter2.save_episode() + + # Verify the second episode was saved + video_path = exporter2.root / exporter2.meta.get_video_file_path( + 1, "observation.images.ego_view" + ) + assert video_path.exists(), f"Second episode video file not found: {video_path}" + + @pytest.mark.skipif( + shutil.which("ffmpeg") is None, reason="ffmpeg not installed, skipping test" + ) + def test_interrupted_no_episode_completion( + self, temp_dir, test_features, test_modality_config, test_data_collection_info + ): + """ + Test specifically for the case when interruption happens in the middle of recording an episode. + Uses the real Gr00tDataExporter implementation. + """ + # First session - add some frames and interrupt before saving + exporter1 = Gr00tDataExporter.create( + save_root=temp_dir, + fps=30, + features=test_features, + modality_config=test_modality_config, + task="test_task", + data_collection_info=test_data_collection_info, + vcodec="libx264", + ) + + # Add 3 frames but don't save + for frame in range(3): + exporter1.add_frame(get_test_frame(frame)) + # Don't save episode or consolidate to simulate interruption + # The episode buffer is only in memory and will be lost on interruption + + # Let file system operations complete + time.sleep(0.5) + + # Verify no episode was saved + video_path = exporter1.root / exporter1.meta.get_video_file_path( + 0, "observation.images.ego_view" + ) + assert not video_path.exists(), f"Episode should not have been saved: {video_path}" + + # Second session - will raise an error because no meta file exist, so we can't resume + with pytest.raises(ValueError): + _ = Gr00tDataExporter.create( + save_root=temp_dir, + fps=30, + features=test_features, + modality_config=test_modality_config, + task="test_task", + vcodec="libx264", + ) + + +@pytest.mark.skipif(shutil.which("ffmpeg") is None, reason="ffmpeg not installed, skipping test") +def test_full_workflow(temp_dir, test_features, test_modality_config, test_data_collection_info): + """ + Test that simulates the complete workflow from the record_session.py example. + """ + NUM_EPISODES = 2 + FRAMES_PER_EPISODE = 3 + + # Create the exporter + exporter = Gr00tDataExporter.create( + save_root=temp_dir, + fps=20, + features=test_features, + modality_config=test_modality_config, + task="test_task", + data_collection_info=test_data_collection_info, + robot_type="dummy", + ) + + # Create a small dataset + for episode_index in range(NUM_EPISODES): + for frame_index in range(FRAMES_PER_EPISODE): + exporter.add_frame(get_test_frame(frame_index)) + exporter.save_episode() + + # check modality config + modality_config_path = exporter.root / "meta" / "modality.json" + assert modality_config_path.exists(), f"{modality_config_path} does not exists." + with open(modality_config_path, "rb") as f: + actual_modality_config = json.load(f) + + assert ( + actual_modality_config == test_modality_config + ), f"Modality configs don't match.\nActual: {actual_modality_config}\nExpected: {test_modality_config}" + + # Verify results + for episode_idx in range(NUM_EPISODES): + video_path = exporter.root / exporter.meta.get_video_file_path( + episode_idx, "observation.images.ego_view" + ) + assert video_path.exists(), f"Video file not found: {video_path}" + + # Check that the expected number of episodes exists + episode_count = 0 + for path in exporter.root.glob("**/*.mp4"): + episode_count += 1 + assert episode_count == NUM_EPISODES, f"Expected {NUM_EPISODES} episodes, found {episode_count}" + + # Check the values of the dataset + dataset = LeRobotDataset( + repo_id="dataset", + root=temp_dir, + ) + for episode_idx in range(NUM_EPISODES): + for frame_idx in range(FRAMES_PER_EPISODE): + expected_frame = get_test_frame(frame_idx) + actual_frame = dataset[episode_idx * FRAMES_PER_EPISODE + frame_idx] + print(actual_frame["observation.images.ego_view"]) + actual_image_frame = actual_frame["observation.images.ego_view"].permute(1, 2, 0) * 255 + assert np.allclose( + actual_image_frame.numpy(), expected_frame["observation.images.ego_view"], atol=10 + ) # Allow some tolerance for video compression + assert np.allclose( + actual_frame["observation.state"], expected_frame["observation.state"] + ) + assert np.allclose(actual_frame["action"], expected_frame["action"]) + + # validate data_collection_info + assert dataset.meta.info["data_collection_info"] == test_data_collection_info.to_dict() + + +@pytest.mark.skipif(shutil.which("ffmpeg") is None, reason="ffmpeg not installed, skipping test") +def test_overwrite_existing_dataset_false( + temp_dir, test_features, test_modality_config, test_data_collection_info +): + """ + Test that appends to the existing dataset when overwrite_existing is set to false. + """ + # first dataset + FIRST_NUM_EPISODES = 2 + FIRST_FRAMES_PER_EPISODE = 3 + + exporter = Gr00tDataExporter.create( + save_root=temp_dir, + fps=20, + features=test_features, + modality_config=test_modality_config, + task="test_task", + data_collection_info=test_data_collection_info, + robot_type="dummy", + ) + # !! `overwrite_existing` should always be set to false by default + # So we're deliberately not setting the overwrite_existing argument here. + # This test ensures that + # i. the default behavior is overwrite_existing=False + # ii. the dataset appends to the existing dataset (instead of overwriting) + + # Create a first dataset + for episode_index in range(FIRST_NUM_EPISODES): + for frame_index in range(FIRST_FRAMES_PER_EPISODE): + exporter.add_frame(get_test_frame(frame_index)) + exporter.save_episode() + + # second dataset + del exporter + SECOND_NUM_EPISODES = 3 + SECOND_FRAMES_PER_EPISODE = 2 + + exporter = Gr00tDataExporter.create( + save_root=temp_dir, + fps=20, + features=test_features, + modality_config=test_modality_config, + task="test_task", + robot_type="dummy", + ) + for episode_index in range(SECOND_NUM_EPISODES): + for frame_index in range(SECOND_FRAMES_PER_EPISODE): + exporter.add_frame(get_test_frame(frame_index)) + exporter.save_episode() + + # verify that there are + EXPECTED_NUM_EPISODES = FIRST_NUM_EPISODES + SECOND_NUM_EPISODES + assert len(list(exporter.root.glob("**/*.mp4"))) == EXPECTED_NUM_EPISODES + assert len(list(exporter.root.glob("**/*.parquet"))) == EXPECTED_NUM_EPISODES + + +def test_overwrite_existing_dataset_true( + temp_dir, test_features, test_modality_config, test_data_collection_info +): + """ + Test that overwrites to an existing dataset when overwrite_existing=True. + """ + # first dataset + FIRST_NUM_EPISODES = 2 + FIRST_FRAMES_PER_EPISODE = 3 + + exporter = Gr00tDataExporter.create( + save_root=temp_dir, + fps=20, + features=test_features, + modality_config=test_modality_config, + task="test_task", + data_collection_info=test_data_collection_info, + robot_type="dummy", + ) + + # Create a first dataset + for episode_index in range(FIRST_NUM_EPISODES): + for frame_index in range(FIRST_FRAMES_PER_EPISODE): + exporter.add_frame(get_test_frame(frame_index)) + exporter.save_episode() + + # verify that the dataset is written to the disk + assert len(list(exporter.root.glob("**/*.mp4"))) == FIRST_NUM_EPISODES + assert len(list(exporter.root.glob("**/*.parquet"))) == FIRST_NUM_EPISODES + + # second dataset + SECOND_NUM_EPISODES = 3 + SECOND_FRAMES_PER_EPISODE = 2 + + # re-initialize the exporter + del exporter + exporter = Gr00tDataExporter.create( + save_root=temp_dir, + fps=20, + features=test_features, + modality_config=test_modality_config, + task="test_task", + data_collection_info=test_data_collection_info, + robot_type="dummy", + overwrite_existing=True, + ) + for episode_index in range(SECOND_NUM_EPISODES): + for frame_index in range(SECOND_FRAMES_PER_EPISODE): + exporter.add_frame(get_test_frame(frame_index)) + exporter.save_episode() + + # verify that the dataset is overwritten + assert len(list(exporter.root.glob("**/*.mp4"))) == SECOND_NUM_EPISODES + assert len(list(exporter.root.glob("**/*.parquet"))) == SECOND_NUM_EPISODES + + +def test_save_episode_as_discarded_and_skip( + temp_dir, test_features, test_modality_config, test_data_collection_info +): + """ + Test that verifies the functionality of saving an episode as discarded and skipping an episode. + """ + FIRST_NUM_EPISODES = 10 + FIRST_FRAMES_PER_EPISODE = 3 + + exporter = Gr00tDataExporter.create( + save_root=temp_dir, + fps=20, + features=test_features, + modality_config=test_modality_config, + task="test_task", + data_collection_info=test_data_collection_info, + robot_type="dummy", + ) + + # Create a first dataset + saved_episodes = 0 + discarded_episode_indices = [] + for episode_index in range(FIRST_NUM_EPISODES): + for frame_index in range(FIRST_FRAMES_PER_EPISODE): + exporter.add_frame(get_test_frame(frame_index)) + if episode_index % 3 == 0: + exporter.save_episode_as_discarded() + discarded_episode_indices.append(saved_episodes) + saved_episodes += 1 + elif episode_index % 3 == 1: + exporter.skip_and_start_new_episode() + else: + exporter.save_episode() + saved_episodes += 1 + + # verify that the dataset is written to the disk + assert len(list(exporter.root.glob("**/*.mp4"))) == saved_episodes + assert len(list(exporter.root.glob("**/*.parquet"))) == saved_episodes + + dataset = LeRobotDataset( + repo_id="dataset", + root=temp_dir, + ) + + assert dataset.meta.info["discarded_episode_indices"] == discarded_episode_indices diff --git a/tests/replay_data/all_joints_raw_data_replay.pkl b/decoupled_wbc/tests/replay_data/all_joints_raw_data_replay.pkl similarity index 100% rename from tests/replay_data/all_joints_raw_data_replay.pkl rename to decoupled_wbc/tests/replay_data/all_joints_raw_data_replay.pkl diff --git a/tests/replay_data/calibration_data.pkl b/decoupled_wbc/tests/replay_data/calibration_data.pkl similarity index 100% rename from tests/replay_data/calibration_data.pkl rename to decoupled_wbc/tests/replay_data/calibration_data.pkl diff --git a/tests/replay_data/g1_pnpbottle.parquet b/decoupled_wbc/tests/replay_data/g1_pnpbottle.parquet similarity index 100% rename from tests/replay_data/g1_pnpbottle.parquet rename to decoupled_wbc/tests/replay_data/g1_pnpbottle.parquet diff --git a/tests/replay_data/interpolation_data.pkl b/decoupled_wbc/tests/replay_data/interpolation_data.pkl similarity index 100% rename from tests/replay_data/interpolation_data.pkl rename to decoupled_wbc/tests/replay_data/interpolation_data.pkl diff --git a/tests/replay_data/sin_wave_arm_replay.pkl b/decoupled_wbc/tests/replay_data/sin_wave_arm_replay.pkl similarity index 100% rename from tests/replay_data/sin_wave_arm_replay.pkl rename to decoupled_wbc/tests/replay_data/sin_wave_arm_replay.pkl diff --git a/tests/replay_data/target_joints_data.pkl b/decoupled_wbc/tests/replay_data/target_joints_data.pkl similarity index 100% rename from tests/replay_data/target_joints_data.pkl rename to decoupled_wbc/tests/replay_data/target_joints_data.pkl diff --git a/decoupled_wbc/tests/sim/test_sim_data_collection.py b/decoupled_wbc/tests/sim/test_sim_data_collection.py new file mode 100644 index 0000000..a9d801d --- /dev/null +++ b/decoupled_wbc/tests/sim/test_sim_data_collection.py @@ -0,0 +1,64 @@ +from decoupled_wbc.control.main.teleop.configs.configs import SyncSimDataCollectionConfig +from decoupled_wbc.control.main.teleop.run_sync_sim_data_collection import ( + main as data_collection_main, +) + + +def test_sim_data_collection_unit(robot_name="G1", task_name="GroundOnly"): + """ + Fast CI unit test for simulation data collection (50 steps, no tracking checks). + + This test validates that: + 1. Data collection completes successfully + 2. Upper body joints are moving (velocity check) + + Note: This test runs for only 50 steps and does not perform end effector tracking validation + for faster CI execution. + """ + config = SyncSimDataCollectionConfig() + config.robot = robot_name + config.task_name = task_name + config.enable_visualization = False + config.enable_real_device = False + config.enable_onscreen = False + config.save_img_obs = True + config.ci_test = True + config.ci_test_mode = "unit" + config.replay_data_path = "decoupled_wbc/tests/replay_data/all_joints_raw_data_replay.pkl" + config.remove_existing_dir = True + config.enable_gravity_compensation = True + res = data_collection_main(config) + assert res, "Data collection did not pass for all datasets" + + +def test_sim_data_collection_pre_merge(robot_name="G1", task_name="GroundOnly"): + """ + Pre-merge test for simulation data collection with end effector tracking validation (500 steps). + + This test validates that: + 1. Data collection completes successfully + 2. Upper body joints are moving (velocity check) + 3. End effector tracking error is within thresholds: + - G1 robots: + Max position error < 7cm (0.07m), Max rotation error < 17°, + Average position error < 5cm (0.05m), Average rotation error < 12° + """ + config = SyncSimDataCollectionConfig() + config.robot = robot_name + config.task_name = task_name + config.enable_visualization = False + config.enable_real_device = False + config.enable_onscreen = False + config.save_img_obs = True + config.ci_test = True + config.ci_test_mode = "pre_merge" + config.replay_data_path = "decoupled_wbc/tests/replay_data/all_joints_raw_data_replay.pkl" + config.remove_existing_dir = True + config.enable_gravity_compensation = True + res = data_collection_main(config) + assert res, "Data collection did not pass for all datasets" + + +if __name__ == "__main__": + # Run unit tests for fast CI + test_sim_data_collection_unit("G1", "GroundOnly") diff --git a/gr00t_wbc/version.py b/decoupled_wbc/version.py similarity index 100% rename from gr00t_wbc/version.py rename to decoupled_wbc/version.py diff --git a/docker/.bashrc b/docker/.bashrc deleted file mode 100644 index ecd18c6..0000000 --- a/docker/.bashrc +++ /dev/null @@ -1,160 +0,0 @@ -# ~/.bashrc: executed by bash(1) for non-login shells. -# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc) -# for examples - -# If not running interactively, don't do anything -case $- in - *i*) ;; - *) return;; -esac - -# don't put duplicate lines or lines starting with space in the history. -# See bash(1) for more options -HISTCONTROL=ignoreboth - -# append to the history file, don't overwrite it -shopt -s histappend - -# for setting history length see HISTSIZE and HISTFILESIZE in bash(1) -HISTSIZE=1000 -HISTFILESIZE=2000 - -# check the window size after each command and, if necessary, -# update the values of LINES and COLUMNS. -shopt -s checkwinsize - -# If set, the pattern "**" used in a pathname expansion context will -# match all files and zero or more directories and subdirectories. -#shopt -s globstar - -# make less more friendly for non-text input files, see lesspipe(1) -[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)" - -# set variable identifying the chroot you work in (used in the prompt below) -if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then - debian_chroot=$(cat /etc/debian_chroot) -fi - -# set a fancy prompt (non-color, unless we know we "want" color) -case "$TERM" in - xterm-color|*-256color) color_prompt=yes;; -esac - -# uncomment for a colored prompt, if the terminal has the capability; turned -# off by default to not distract the user: the focus in a terminal window -# should be on the output of commands, not on the prompt -force_color_prompt=yes - -if [ -n "$force_color_prompt" ]; then - if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then - # We have color support; assume it's compliant with Ecma-48 - # (ISO/IEC-6429). (Lack of such support is extremely rare, and such - # a case would tend to support setf rather than setaf.) - color_prompt=yes - else - color_prompt= - fi -fi - -if [ "$color_prompt" = yes ]; then - PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ ' -else - PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ ' -fi -unset color_prompt force_color_prompt - -# If this is an xterm set the title to user@host:dir -case "$TERM" in -xterm*|rxvt*) - PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1" - ;; -*) - ;; -esac - -# enable color support of ls and also add handy aliases -if [ -x /usr/bin/dircolors ]; then - test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)" - alias ls='ls --color=auto' - #alias dir='dir --color=auto' - #alias vdir='vdir --color=auto' - - alias grep='grep --color=auto' - alias fgrep='fgrep --color=auto' - alias egrep='egrep --color=auto' -fi - -# colored GCC warnings and errors -export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01' - -# Set terminal type for color support -export TERM=xterm-256color - -# some more ls aliases -alias ll='ls -alF' -alias la='ls -A' -alias l='ls -CF' - -# Add an "alert" alias for long running commands. Use like so: -# sleep 10; alert -alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"' - -# Alias definitions. -# You may want to put all your additions into a separate file like -# ~/.bash_aliases, instead of adding them here directly. -# See /usr/share/doc/bash-doc/examples in the bash-doc package. - -if [ -f ~/.bash_aliases ]; then - . ~/.bash_aliases -fi - -# enable programmable completion features (you don't need to enable -# this, if it's already enabled in /etc/bash.bashrc and /etc/profile -# sources /etc/bash.bashrc). -if ! shopt -oq posix; then - if [ -f /usr/share/bash-completion/bash_completion ]; then - . /usr/share/bash-completion/bash_completion - elif [ -f /etc/bash_completion ]; then - . /etc/bash_completion - fi -fi - -# useful commands -bind '"\e[A": history-search-backward' -bind '"\e[B": history-search-forward' - -# Store the last 10 directories in a history file -CD_HISTFILE=~/.cd_history -CD_HISTSIZE=10 - -cd() { - local histfile="${CD_HISTFILE:-$HOME/.cd_history}" - local max="${CD_HISTSIZE:-10}" - - case "$1" in - --) [ -f "$histfile" ] && tac "$histfile" | nl -w2 -s' ' || echo "No directory history yet."; return ;; - -[0-9]*) - local idx=${1#-} - local dir=$(tac "$histfile" 2>/dev/null | sed -n "${idx}p") - [ -n "$dir" ] && builtin cd "$dir" || echo "Invalid selection: $1" - return ;; - esac - - builtin cd "$@" || return - - [[ $(tail -n1 "$histfile" 2>/dev/null) != "$PWD" ]] && echo "$PWD" >> "$histfile" - tail -n "$max" "$histfile" > "${histfile}.tmp" && mv "${histfile}.tmp" "$histfile" -} - -# Manus to LD_LIBRARY_PATH -export LD_LIBRARY_PATH=$GR00T_WBC_DIR/gr00t_wbc/control/teleop/device/SDKClient_Linux/ManusSDK/lib:$LD_LIBRARY_PATH - -# CUDA support -export LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu:/usr/lib:/lib/x86_64-linux-gnu:/lib64:/lib:$LD_LIBRARY_PATH - -# gr00t_wbc aliases -alias dg="python $GR00T_WBC_DIR/scripts/deploy_g1.py" -alias rsl="python $GR00T_WBC_DIR/gr00t_wbc/control/main/teleop/run_sim_loop.py" -alias rgcl="python $GR00T_WBC_DIR/gr00t_wbc/control/main/teleop/run_g1_control_loop.py" -alias rtpl="python $GR00T_WBC_DIR/gr00t_wbc/control/main/teleop/run_teleop_policy_loop.py" -alias tgcl="pytest $GR00T_WBC_DIR/tests/control/main/teleop/test_g1_control_loop.py -s" diff --git a/docker/Dockerfile.deploy b/docker/Dockerfile.deploy deleted file mode 100644 index 7ba55e3..0000000 --- a/docker/Dockerfile.deploy +++ /dev/null @@ -1,127 +0,0 @@ -FROM nvgear/ros-2:latest - -# Accept build argument for username -ARG USERNAME -ARG USERID -ARG HOME_DIR -ARG WORKTREE_NAME - -# Create user with the same name as host -RUN if [ "$USERID" != "0" ]; then \ - useradd -m -u ${USERID} -s /bin/bash ${USERNAME} && \ - echo "${USERNAME} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers && \ - # Add user to video and render groups for GPU access - usermod -a -G video,render ${USERNAME} || true; \ - fi - -# Copy .bashrc with color settings before switching user -COPY --chown=${USERNAME}:${USERNAME} docker/.bashrc ${HOME_DIR}/.bashrc - -# Install Manus udev rules -COPY --chown=${USERNAME}:${USERNAME} docker/70-manus-hid.rules /etc/udev/rules.d/70-manus-hid.rules - -# Copy tmux configuration -COPY --chown=${USERNAME}:${USERNAME} docker/.tmux.conf ${HOME_DIR}/.tmux.conf - -# Switch to user -USER ${USERNAME} - -# Install tmux plugin manager and uv in parallel -RUN git clone https://github.com/tmux-plugins/tpm ${HOME_DIR}/.tmux/plugins/tpm & \ - curl -LsSf https://astral.sh/uv/install.sh | env UV_INSTALL_DIR=${HOME_DIR}/.cargo/bin sh & \ - wait - -# Install tmux plugins automatically -RUN ${HOME_DIR}/.tmux/plugins/tpm/bin/install_plugins || true - -# Add uv to PATH -ENV PATH="${HOME_DIR}/.cargo/bin:$PATH" -ENV UV_PYTHON=${HOME_DIR}/venv/bin/python - -# Create venv -RUN uv venv --python 3.10 ${HOME_DIR}/venv - -# Install hardware-specific packages (x86 only - not available on ARM64/Orin) -USER root -COPY --chown=${USERNAME}:${USERNAME} gr00t_wbc/control/teleop/device/pico/XRoboToolkit_PC_Service_1.0.0_ubuntu_22.04_amd64.deb ${HOME_DIR}/XRoboToolkit_PC_Service_1.0.0_ubuntu_22.04_amd64.deb -COPY --chown=${USERNAME}:${USERNAME} gr00t_wbc/control/teleop/device/pico/roboticsservice_1.0.0.0_arm64.deb ${HOME_DIR}/roboticsservice_1.0.0.0_arm64.deb - -RUN if [ "$(dpkg --print-architecture)" = "amd64" ]; then \ - # Ultra Leap setup - wget -qO - https://repo.ultraleap.com/keys/apt/gpg | gpg --dearmor | tee /etc/apt/trusted.gpg.d/ultraleap.gpg && \ - echo 'deb [arch=amd64] https://repo.ultraleap.com/apt stable main' | tee /etc/apt/sources.list.d/ultraleap.list && \ - apt-get update && \ - echo "yes" | DEBIAN_FRONTEND=noninteractive apt-get install -y ultraleap-hand-tracking libhidapi-dev && \ - # Space Mouse udev rules - echo 'KERNEL=="hidraw*", SUBSYSTEM=="hidraw", MODE="0664", GROUP="plugdev"' > /etc/udev/rules.d/99-hidraw-permissions.rules && \ - usermod -aG plugdev ${USERNAME}; \ - # Pico setup - apt-get install -y xdg-utils && \ - dpkg -i ${HOME_DIR}/XRoboToolkit_PC_Service_1.0.0_ubuntu_22.04_amd64.deb; \ - else \ - echo "Skipping x86-only hardware packages on $(dpkg --print-architecture)"; \ - fi - -USER ${USERNAME} -# Install hardware Python packages (x86 only) with caching -RUN --mount=type=cache,target=${HOME_DIR}/.cache/uv,uid=${USERID},gid=${USERID} \ - if [ "$(dpkg --print-architecture)" = "amd64" ]; then \ - # Ultra Leap Python bindings - git clone https://github.com/ultraleap/leapc-python-bindings ${HOME_DIR}/leapc-python-bindings && \ - cd ${HOME_DIR}/leapc-python-bindings && \ - UV_CONCURRENT_DOWNLOADS=8 uv pip install -r requirements.txt && \ - MAKEFLAGS="-j$(nproc)" ${HOME_DIR}/venv/bin/python -m build leapc-cffi && \ - uv pip install leapc-cffi/dist/leapc_cffi-0.0.1.tar.gz && \ - uv pip install -e leapc-python-api && \ - # Space Mouse Python package - uv pip install pyspacemouse && \ - # Pico Python bindings - git clone https://github.com/XR-Robotics/XRoboToolkit-PC-Service-Pybind.git ${HOME_DIR}/XRoboToolkit-PC-Service-Pybind && \ - cd ${HOME_DIR}/XRoboToolkit-PC-Service-Pybind && \ - uv pip install setuptools pybind11 && \ - sed -i "s|pip install|uv pip install|g" setup_ubuntu.sh && \ - sed -i "s|pip uninstall|uv pip uninstall|g" setup_ubuntu.sh && \ - sed -i "s|python setup.py install|${HOME_DIR}/venv/bin/python setup.py install|g" setup_ubuntu.sh && \ - bash setup_ubuntu.sh; \ - fi - -# Install Python dependencies using uv with caching -RUN --mount=type=cache,target=${HOME_DIR}/.cache/uv,uid=${USERID},gid=${USERID} \ - UV_CONCURRENT_DOWNLOADS=8 uv pip install --upgrade pip ipython jupyter notebook debugpy - - -# Copy entire project to the workspace directory where it will be mounted at runtime -# NOTE: The build context must be the project root for this to work -# Use dynamic worktree name to match runtime mount path -COPY --chown=${USERNAME}:${USERNAME} . ${HOME_DIR}/Projects/${WORKTREE_NAME} - -# Install Python dependencies inside the venv with caching - split into separate commands -RUN --mount=type=cache,target=${HOME_DIR}/.cache/uv,uid=${USERID},gid=${USERID} \ - UV_CONCURRENT_DOWNLOADS=8 uv pip install \ - -e ${HOME_DIR}/Projects/${WORKTREE_NAME}/external_dependencies/unitree_sdk2_python - -# Unlike pip, uv downloads LFS files by default. There's a bug in uv that causes LFS files -# to fail to download (https://github.com/astral-sh/uv/issues/3312). So we need to set -# UV_GIT_LFS=1 to prevent uv from downloading LFS files. -# Install project dependencies (VLA extras only on x86) with caching -RUN --mount=type=cache,target=${HOME_DIR}/.cache/uv,uid=${USERID},gid=${USERID} \ - GIT_LFS_SKIP_SMUDGE=1 UV_CONCURRENT_DOWNLOADS=8 uv pip install -e "${HOME_DIR}/Projects/${WORKTREE_NAME}[dev]" - -# Clone and install robosuite with specific branch -RUN --mount=type=cache,target=${HOME_DIR}/.cache/uv,uid=${USERID},gid=${USERID} \ - git clone https://github.com/xieleo5/robosuite.git ${HOME_DIR}/robosuite && \ - cd ${HOME_DIR}/robosuite && \ - git checkout leo/support_g1_locomanip && \ - UV_CONCURRENT_DOWNLOADS=8 uv pip install -e . - -# Install gr00trobocasa -RUN --mount=type=cache,target=${HOME_DIR}/.cache/uv,uid=${USERID},gid=${USERID} \ - UV_CONCURRENT_DOWNLOADS=8 uv pip install -e ${HOME_DIR}/Projects/${WORKTREE_NAME}/gr00t_wbc/dexmg/gr00trobocasa - -# Configure bash environment with virtual environment and ROS2 setup -RUN echo "source ${HOME_DIR}/venv/bin/activate" >> ${HOME_DIR}/.bashrc && \ - echo "source /opt/ros/humble/setup.bash" >> ${HOME_DIR}/.bashrc && \ - echo "export ROS_LOCALHOST_ONLY=1" >> ${HOME_DIR}/.bashrc - -# Default command (can be overridden at runtime) -CMD ["/bin/bash"] diff --git a/docker/build_deploy_base.sh b/docker/build_deploy_base.sh deleted file mode 100644 index 6c1ddb6..0000000 --- a/docker/build_deploy_base.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash - -# Multi-architecture Docker build script -# Supports linux/amd64 - -set -e - -# Configuration -IMAGE_NAME="nvgear/ros-2" -TAG="${1:-latest}" -DOCKERFILE="docker/Dockerfile.deploy.base" - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -NC='\033[0m' # No Color - -echo -e "${GREEN}Building multi-architecture Docker image: ${IMAGE_NAME}:${TAG}${NC}" - -# Ensure we're using the multiarch builder -echo -e "${YELLOW}Setting up multiarch builder...${NC}" -sudo docker buildx use multiarch-builder 2>/dev/null || { - echo -e "${YELLOW}Creating multiarch builder...${NC}" - sudo docker buildx create --name multiarch-builder --use --bootstrap -} - -# Show supported platforms -echo -e "${YELLOW}Supported platforms:${NC}" -sudo docker buildx inspect --bootstrap | grep Platforms - -# Build for multiple architectures -echo -e "${GREEN}Starting multi-arch build...${NC}" -sudo docker buildx build \ - --platform linux/amd64 \ - --file "${DOCKERFILE}" \ - --tag "${IMAGE_NAME}:${TAG}" \ - --push \ - . - -# Alternative: Build and load locally (only works for single platform) -# docker buildx build \ -# --platform linux/amd64 \ -# --file "${DOCKERFILE}" \ -# --tag "${IMAGE_NAME}:${TAG}" \ -# --load \ -# . - -echo -e "${GREEN}Multi-arch build completed successfully!${NC}" -echo -e "${GREEN}Image: ${IMAGE_NAME}:${TAG}${NC}" -echo -e "${GREEN}Platforms: linux/amd64${NC}" - -# Verify the manifest -echo -e "${YELLOW}Verifying multi-arch manifest...${NC}" -sudo docker buildx imagetools inspect "${IMAGE_NAME}:${TAG}" \ No newline at end of file diff --git a/docker/entrypoint/bash.sh b/docker/entrypoint/bash.sh deleted file mode 100755 index 1142dd8..0000000 --- a/docker/entrypoint/bash.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -set -e # Exit on error - -echo "Dependencies installed successfully. Starting interactive bash shell..." -exec /bin/bash \ No newline at end of file diff --git a/docker/entrypoint/deploy.sh b/docker/entrypoint/deploy.sh deleted file mode 100755 index 216fbc2..0000000 --- a/docker/entrypoint/deploy.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -set -e # Exit on error - -# Run the deployment script -# Check for script existence before running -DEPLOY_SCRIPT="scripts/deploy_g1.py" -if [ -f "$DEPLOY_SCRIPT" ]; then - echo "Running deployment script at $DEPLOY_SCRIPT" - echo "Using python from $(which python)" - echo "Deploy args: $@" - exec python "$DEPLOY_SCRIPT" "$@" -else - echo "ERROR: Deployment script not found at $DEPLOY_SCRIPT" - echo "Current directory structure:" - find . -type f -name "*.py" | grep -i deploy - echo "Available script options:" - find . -type f -name "*.py" | sort - echo "Starting a bash shell for troubleshooting..." - exec /bin/bash -fi \ No newline at end of file diff --git a/docker/entrypoint/install_deps.sh b/docker/entrypoint/install_deps.sh deleted file mode 100755 index cb602fb..0000000 --- a/docker/entrypoint/install_deps.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -set -e - -# Source virtual environment and ROS2 -source ${HOME}/venv/bin/activate -source /opt/ros/humble/setup.bash -export ROS_LOCALHOST_ONLY=1 - -# Install external dependencies -echo "Current directory: $(pwd)" -echo "Installing dependencies..." - -# Install Unitree SDK and LeRobot -if [ -d "external_dependencies/unitree_sdk2_python" ]; then - cd external_dependencies/unitree_sdk2_python/ - uv pip install -e . --no-deps - cd ../.. -fi - -# Install project -if [ -f "pyproject.toml" ]; then - UV_GIT_LFS=1 uv pip install -e ".[dev]" -fi diff --git a/docker/kill_gr00t_wbc_processors.sh b/docker/kill_gr00t_wbc_processors.sh deleted file mode 100755 index 03d0819..0000000 --- a/docker/kill_gr00t_wbc_processors.sh +++ /dev/null @@ -1,192 +0,0 @@ -#!/bin/bash - -# kill_gr00t_wbc_processors.sh -# Kill gr00t_wbc processes in current container to prevent message passing conflicts - -# Note: Don't use 'set -e' as tmux/pgrep commands may return non-zero exit codes - -# Configuration -DRY_RUN=false -FORCE=false -QUIET=false -declare -A FOUND_PROCESSES - -# Default to verbose mode if no arguments -[[ $# -eq 0 ]] && { QUIET=false; DRY_RUN=false; } - -# Parse arguments -while [[ $# -gt 0 ]]; do - case $1 in - --dry-run) DRY_RUN=true ;; - --force) FORCE=true ;; - --verbose|-v) VERBOSE=true ;; - --help|-h) - echo "Usage: $0 [--dry-run] [--force] [--verbose] [--help]" - echo "Kill gr00t_wbc processes to prevent message passing conflicts" - exit 0 ;; - *) echo "Unknown option: $1"; exit 1 ;; - esac - shift -done - -# Colors (only if not quiet) -if [[ "$QUIET" != true ]]; then - RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; BLUE='\033[0;34m'; NC='\033[0m' -else - RED=''; GREEN=''; YELLOW=''; BLUE=''; NC='' -fi - -# Show processes by pattern (for preview) -show_processes_by_pattern() { - local pattern="$1" desc="$2" - local pids=$(pgrep -f "$pattern" 2>/dev/null || true) - - [[ -z "$pids" ]] && return 0 - - echo -e "${YELLOW}$desc processes:${NC}" - - for pid in $pids; do - local cmd=$(ps -p $pid -o cmd= 2>/dev/null || echo "Process not found") - echo " PID $pid: $cmd" - done -} - -# Kill processes by pattern (silent killing) -kill_by_pattern() { - local pattern="$1" desc="$2" signal="${3:-TERM}" - local pids=$(pgrep -f "$pattern" 2>/dev/null || true) - - [[ -z "$pids" ]] && return 0 - - for pid in $pids; do - # Kill if not dry run - [[ "$DRY_RUN" != true ]] && kill -$signal $pid 2>/dev/null - done -} - -# Show tmux sessions (for preview) -show_tmux() { - local pattern="$1" - local sessions=$(tmux list-sessions 2>/dev/null | grep "$pattern" | cut -d: -f1 || true) - - [[ -z "$sessions" ]] && return 0 - - echo -e "${YELLOW}Tmux sessions:${NC}" - - for session in $sessions; do - echo " Session: $session" - done -} - -# Kill tmux sessions (silent killing) -kill_tmux() { - local pattern="$1" - local sessions=$(tmux list-sessions 2>/dev/null | grep "$pattern" | cut -d: -f1 || true) - - [[ -z "$sessions" ]] && return 0 - - for session in $sessions; do - [[ "$DRY_RUN" != true ]] && tmux kill-session -t "$session" 2>/dev/null - done -} - -# Show processes by port (for preview) -show_processes_by_port() { - local port="$1" desc="$2" - local pids=$(lsof -ti:$port 2>/dev/null || true) - - [[ -z "$pids" ]] && return 0 - - echo -e "${YELLOW}$desc (port $port):${NC}" - - for pid in $pids; do - local cmd=$(ps -p $pid -o cmd= 2>/dev/null || echo "Process not found") - echo " PID $pid: $cmd" - done -} - -# Kill processes by port (silent killing) -kill_by_port() { - local port="$1" desc="$2" - local pids=$(lsof -ti:$port 2>/dev/null || true) - - [[ -z "$pids" ]] && return 0 - - for pid in $pids; do - [[ "$DRY_RUN" != true ]] && kill -TERM $pid 2>/dev/null - done -} - -# Check if any processes exist -has_processes() { - # Check for processes - local has_tmux=$(tmux list-sessions 2>/dev/null | grep "g1_deployment" || true) - local has_control=$(pgrep -f "run_g1_control_loop.py" 2>/dev/null || true) - local has_teleop=$(pgrep -f "run_teleop_policy_loop.py" 2>/dev/null || true) - local has_camera=$(pgrep -f "camera_forwarder.py" 2>/dev/null || true) - local has_rqt=$(pgrep -f "rqt.*image_view" 2>/dev/null || true) - local has_port=$(lsof -ti:5555 2>/dev/null || true) - - [[ -n "$has_tmux" || -n "$has_control" || -n "$has_teleop" || -n "$has_camera" || -n "$has_rqt" || -n "$has_port" ]] -} - -# Main execution -main() { - # Check if any processes exist first - if ! has_processes; then - # No processes to kill, exit silently - exit 0 - fi - - # Show header and processes to be killed - if [[ "$QUIET" != true ]]; then - echo -e "${BLUE}=== gr00t_wbc Process Killer ===${NC}" - [[ "$DRY_RUN" == true ]] && echo -e "${BLUE}=== DRY RUN MODE ===${NC}" - - # Show what will be killed - show_tmux "g1_deployment" - show_processes_by_pattern "run_g1_control_loop.py" "G1 control loop" - show_processes_by_pattern "run_teleop_policy_loop.py" "Teleop policy" - show_processes_by_pattern "camera_forwarder.py" "Camera forwarder" - show_processes_by_pattern "rqt.*image_view" "RQT viewer" - show_processes_by_port "5555" "Inference server" - - # Ask for confirmation - if [[ "$FORCE" != true && "$DRY_RUN" != true ]]; then - echo - echo -e "${RED}WARNING: This will terminate the above gr00t_wbc processes!${NC}" - read -p "Continue? [Y/n]: " -n 1 -r - echo - # Default to Y - only abort if user explicitly types 'n' or 'N' - [[ $REPLY =~ ^[Nn]$ ]] && { echo "Aborted."; exit 0; } - fi - echo - fi - - # Kill processes (silently) - kill_tmux "g1_deployment" - kill_by_pattern "run_g1_control_loop.py" "G1 control loop" - kill_by_pattern "run_teleop_policy_loop.py" "Teleop policy" - kill_by_pattern "camera_forwarder.py" "Camera forwarder" - kill_by_pattern "rqt.*image_view" "RQT viewer" - kill_by_port "5555" "Inference server" - - # Force kill remaining (SIGKILL) - [[ "$DRY_RUN" != true ]] && { - sleep 1 - kill_by_pattern "run_g1_control_loop.py" "G1 control loop" "KILL" - kill_by_pattern "run_teleop_policy_loop.py" "Teleop policy" "KILL" - kill_by_pattern "camera_forwarder.py" "Camera forwarder" "KILL" - } - - # Summary (unless quiet) - [[ "$QUIET" != true ]] && { - if [[ "$DRY_RUN" == true ]]; then - echo -e "${BLUE}=== DRY RUN COMPLETE ===${NC}" - else - echo -e "${GREEN}All gr00t_wbc processes terminated${NC}" - fi - } -} - -main "$@" diff --git a/docker/run_docker.sh b/docker/run_docker.sh deleted file mode 100755 index cca4621..0000000 --- a/docker/run_docker.sh +++ /dev/null @@ -1,454 +0,0 @@ -#!/bin/bash - -# Docker run script for gr00t_wbc with branch-based container isolation -# -# Usage: -# ./docker/run_docker.sh [OPTIONS] -# -# Options: -# --build Build Docker image -# --clean Clean containers -# --deploy Run in deploy mode -# --install Pull prebuilt Docker image -# --push Push built image to Docker Hub -# --branch Use branch-specific container names -# -# Branch-based Container Isolation (when --branch flag is used): -# - Each git branch gets its own isolated containers -# - Container names include branch identifier (e.g., gr00t_wbc-deploy-user-main) -# - Works with git worktrees, separate clones, or nested repositories -# - Clean and build operations only affect the current branch - -# Exit on error -set -e - -# Default values -BUILD=false -CLEAN=false -DEPLOY=false -INSTALL=false -# Flag to push the built Docker image to Docker Hub -# This should be used when someone updates the Docker image dependencies -# because this image is used for CI/CD pipelines -# When true, the image will be tagged and pushed to docker.io/nvgear/gr00t_wbc:latest (lowercased in practice) -DOCKER_HUB_PUSH=false -# Flag to build the docker with root user -# This could cause some of your local files to be owned by root -# If you get error like "PermissionError: [Errno 13] Permission denied:" -# You can run `sudo chown -R $USER:$USER .` in local machine to fix it -ROOT=false -BRANCH_MODE=false -EXTRA_ARGS=() -PROJECT_NAME="gr00t_wbc" -PROJECT_SLUG=$(echo "$PROJECT_NAME" | tr '[:upper:]' '[:lower:]') -REMOTE_IMAGE="nvgear/${PROJECT_SLUG}:latest" - -# Parse command line arguments -while [[ $# -gt 0 ]]; do - case $1 in - --build) - BUILD=true - shift - ;; - --clean) - CLEAN=true - shift - ;; - --deploy) - DEPLOY=true - shift - ;; - --install) - INSTALL=true - shift - ;; - --push) - DOCKER_HUB_PUSH=true - shift - ;; - --root) - ROOT=true - shift - ;; - --branch) - BRANCH_MODE=true - shift - ;; - *) - # Collect all unknown arguments as extra args for the deployment script - EXTRA_ARGS+=("$1") - shift - ;; - esac -done - -if [ "$INSTALL" = true ] && [ "$BUILD" = true ]; then - echo "Cannot use --install and --build together. Choose one." - exit 1 -fi - - -# Function to get branch name for container naming -function get_branch_id { - # Check if we're in a git repository - if git rev-parse --is-inside-work-tree > /dev/null 2>&1; then - # Get current branch name (returns "HEAD" in detached state) - local branch_name=$(git rev-parse --abbrev-ref HEAD) - # Replace forward slashes with dashes for valid container names - echo "${branch_name//\//-}" - else - # Default: no branch identifier (not in git repo) - echo "" - fi -} - -# Architecture detection helpers -is_arm64() { [ "$(dpkg --print-architecture)" = "arm64" ]; } -is_amd64() { [ "$(dpkg --print-architecture)" = "amd64" ]; } - -# Get current user's username and UID -if [ "$ROOT" = true ]; then - USERNAME=root - USERID=0 - DOCKER_HOME_DIR=/root - CACHE_FROM=${PROJECT_SLUG}-deploy-cache-root -else - USERNAME=$(whoami) - USERID=$(id -u) - DOCKER_HOME_DIR=/home/${USERNAME} - CACHE_FROM=${PROJECT_SLUG}-deploy-cache -fi -# Get input group ID for device access -INPUT_GID=$(getent group input | cut -d: -f3) - -# Get script directory for path calculations -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -# Function to get the actual project directory (worktree-aware) -function get_project_dir { - # For worktrees, use the actual worktree root path - if git rev-parse --is-inside-work-tree > /dev/null 2>&1; then - git rev-parse --show-toplevel - else - # Fallback to script-based detection - dirname "$SCRIPT_DIR" - fi -} - -# Get branch identifier -BRANCH_ID=$(get_branch_id) - -# Set project directory (needs to be after branch detection) -PROJECT_DIR="$(get_project_dir)" - -# Function to generate container name with optional branch support -function get_container_name { - local container_type="$1" - if [[ -n "$BRANCH_ID" ]] && [[ "$BRANCH_MODE" = true ]]; then - echo "${PROJECT_SLUG}-${container_type}-${USERNAME}-${BRANCH_ID}" - else - echo "${PROJECT_SLUG}-${container_type}-${USERNAME}" - fi -} - -# Set common variables used throughout the script -DEPLOY_CONTAINER=$(get_container_name "deploy") -BASH_CONTAINER=$(get_container_name "bash") -WORKTREE_NAME=$(basename "$PROJECT_DIR") - -# Debug output for branch detection -if [[ -n "$BRANCH_ID" ]] && [[ "$BRANCH_MODE" = true ]]; then - echo "Branch mode enabled - using branch: $BRANCH_ID" - echo "Project directory: $PROJECT_DIR" -elif [[ -n "$BRANCH_ID" ]]; then - echo "Branch mode disabled - using default containers" - echo "Project directory: $PROJECT_DIR" -else - echo "Running outside git repository" - echo "Project directory: $PROJECT_DIR" -fi - -# Get host's hostname and append -docker -HOSTNAME=$(hostname)-docker - -function clean_container { - echo "Cleaning up Docker containers..." - - # Stop containers - sudo docker stop $DEPLOY_CONTAINER 2>/dev/null || true - sudo docker stop $BASH_CONTAINER 2>/dev/null || true - # Remove containers - echo "Removing containers..." - sudo docker rm $DEPLOY_CONTAINER 2>/dev/null || true - sudo docker rm $BASH_CONTAINER 2>/dev/null || true - echo "Containers cleaned!" -} - - -# Function to install Docker Buildx if needed -function install_docker_buildx { - # Check if Docker Buildx is already installed - if sudo docker buildx version &> /dev/null; then - echo "Docker Buildx is already installed." - return 0 - fi - - echo "Installing Docker Buildx..." - - # Create directories and detect architecture - mkdir -p ~/.docker/cli-plugins/ && sudo mkdir -p /root/.docker/cli-plugins/ - ARCH=$(dpkg --print-architecture) - [[ "$ARCH" == "arm64" ]] && BUILDX_ARCH="linux-arm64" || BUILDX_ARCH="linux-amd64" - - # Get version (with fallback) - BUILDX_VERSION=$(curl -s https://api.github.com/repos/docker/buildx/releases/latest | grep tag_name | cut -d '"' -f 4) - BUILDX_VERSION=${BUILDX_VERSION:-v0.13.1} - - # Download and install for both user and root - curl -L "https://github.com/docker/buildx/releases/download/${BUILDX_VERSION}/buildx-${BUILDX_VERSION}.${BUILDX_ARCH}" -o ~/.docker/cli-plugins/docker-buildx - sudo cp ~/.docker/cli-plugins/docker-buildx /root/.docker/cli-plugins/docker-buildx - chmod +x ~/.docker/cli-plugins/docker-buildx && sudo chmod +x /root/.docker/cli-plugins/docker-buildx - - # Create builder - sudo docker buildx create --use --name mybuilder || true - sudo docker buildx inspect --bootstrap - - echo "Docker Buildx installation complete!" -} - -# Function to install NVIDIA Container Toolkit if needed -function install_nvidia_toolkit { - # Check if NVIDIA Container Toolkit is already installed - if command -v nvidia-container-toolkit &> /dev/null; then - echo "NVIDIA Container Toolkit is already installed." - return 0 - fi - - echo "Installing NVIDIA Container Toolkit..." - - # Add the package repositories - distribution=$(. /etc/os-release;echo $ID$VERSION_ID) - - # Check if GPG key exists and remove it if it does - if [ -f "/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg" ]; then - echo "Removing existing NVIDIA GPG key..." - sudo rm /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg - fi - - # Add new GPG key - curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg - - # Add repository - curl -s -L https://nvidia.github.io/nvidia-container-runtime/$distribution/nvidia-container-runtime.list | \ - sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ - sudo tee /etc/apt/sources.list.d/nvidia-container-runtime.list - - # Install nvidia-container-toolkit and docker if needed - sudo apt-get update - sudo apt-get install -y nvidia-container-toolkit - - # Install docker if not already installed - if ! command -v docker &> /dev/null; then - sudo apt-get install -y docker.io - fi - - # Configure Docker to use the NVIDIA runtime - sudo nvidia-ctk runtime configure --runtime=docker - - # Restart the Docker daemon - sudo systemctl restart docker - - echo "NVIDIA Container Toolkit installation complete!" -} - - -# Function to build Docker image for current branch -function build_docker_image { - echo "Building Docker image: $DEPLOY_CONTAINER" - - sudo docker buildx build \ - --build-arg USERNAME=$USERNAME \ - --build-arg USERID=$USERID \ - --build-arg HOME_DIR=$DOCKER_HOME_DIR \ - --build-arg WORKTREE_NAME=$WORKTREE_NAME \ - --cache-from $CACHE_FROM \ - -t $DEPLOY_CONTAINER \ - -f docker/Dockerfile.deploy \ - --load \ - . - - # Tag for persistent cache - # sudo docker tag $DEPLOY_CONTAINER $CACHE_FROM - echo "Docker image build complete!" -} - -# Build function -function build_with_cleanup { - echo "Building Docker image..." - echo "Removing existing containers and images..." - clean_container - # Tag for persistent cache before deleting the image - sudo docker tag $DEPLOY_CONTAINER $CACHE_FROM 2>/dev/null || true - sudo docker rmi $DEPLOY_CONTAINER 2>/dev/null || true - echo "Images cleaned!" - - install_docker_buildx - install_nvidia_toolkit - build_docker_image -} - -function install_remote_image { - echo "Installing Docker image from remote registry: $REMOTE_IMAGE" - echo "Removing existing containers to ensure a clean install..." - clean_container - sudo docker pull "$REMOTE_IMAGE" - sudo docker tag "$REMOTE_IMAGE" "$DEPLOY_CONTAINER" - sudo docker tag "$REMOTE_IMAGE" "$CACHE_FROM" 2>/dev/null || true - echo "Docker image install complete!" -} - -# Clean up if requested -if [ "$CLEAN" = true ]; then - clean_container - exit 0 -fi - -# Build if requested -if [ "$BUILD" = true ]; then - build_with_cleanup -fi - -if [ "$INSTALL" = true ]; then - install_remote_image -fi - -if [ "$DOCKER_HUB_PUSH" = true ]; then - echo "Pushing Docker image to Docker Hub: docker.io/nvgear/${PROJECT_SLUG}:latest" - sudo docker tag $DEPLOY_CONTAINER docker.io/nvgear/${PROJECT_SLUG}:latest - sudo docker push docker.io/nvgear/${PROJECT_SLUG}:latest - echo "Docker image pushed to Docker Hub!" - exit 0 -fi - -# Setup X11 display forwarding -setup_x11() { - # Set display if missing and X server available - if [ -z "$DISPLAY" ] && command -v xset >/dev/null 2>&1 && xset q >/dev/null 2>&1; then - export DISPLAY=:1 - echo "No DISPLAY set, using :1" - fi - - # Enable X11 forwarding if possible - if [ -n "$DISPLAY" ] && command -v xhost >/dev/null 2>&1 && xhost +local:docker 2>/dev/null; then - echo "X11 forwarding enabled" - return 0 - else - echo "Headless environment - X11 disabled" - export DISPLAY="" - return 1 - fi -} - -X11_ENABLED=false -setup_x11 && X11_ENABLED=true - -# Mount entire /dev directory for dynamic device access (including hidraw for joycon) -# This allows JoyCon controllers to be detected even when connected after container launch -sudo chmod g+r+w /dev/input/* - -# Detect GPU setup and set appropriate environment variables -echo "Detecting GPU setup..." -GPU_ENV_VARS="" - -# Check if we have both integrated and discrete GPUs (hybrid/Optimus setup) -HAS_AMD_GPU=$(lspci | grep -i "vga\|3d\|display" | grep -i amd | wc -l) -HAS_INTEL_GPU=$(lspci | grep -i "vga\|3d\|display" | grep -i intel | wc -l) -HAS_NVIDIA_GPU=$(lspci | grep -i "vga\|3d\|display" | grep -i nvidia | wc -l) - -if [[ "$HAS_INTEL_GPU" -gt 0 ]] || [[ "$HAS_AMD_GPU" -gt 0 ]] && [[ "$HAS_NVIDIA_GPU" -gt 0 ]]; then - echo "Detected hybrid GPU setup (Intel/AMD integrated + NVIDIA discrete)" - echo "Setting NVIDIA Optimus environment variables for proper rendering offload..." - GPU_ENV_VARS="-e __NV_PRIME_RENDER_OFFLOAD=1 \ - -e __VK_LAYER_NV_optimus=NVIDIA_only" -else - GPU_ENV_VARS="" -fi - -# Set GPU runtime based on architecture -if is_arm64; then - echo "Detected ARM64 architecture (Jetson Orin), using device access instead of nvidia runtime..." - GPU_RUNTIME_ARGS="--device /dev/nvidia0 --device /dev/nvidiactl --device /dev/nvidia-modeset --device /dev/nvidia-uvm --device /dev/nvidia-uvm-tools" -else - GPU_RUNTIME_ARGS="--gpus all --runtime=nvidia" -fi - -# Common Docker run parameters -DOCKER_RUN_ARGS="--hostname $HOSTNAME \ - --user $USERNAME \ - --group-add $INPUT_GID \ - $GPU_RUNTIME_ARGS \ - --ipc=host \ - --network=host \ - --privileged \ - --device=/dev \ - $GPU_ENV_VARS \ - -p 5678:5678 \ - -e DISPLAY=$DISPLAY \ - -e NVIDIA_VISIBLE_DEVICES=all \ - -e NVIDIA_DRIVER_CAPABILITIES=graphics,compute,utility \ - -e __GLX_VENDOR_LIBRARY_NAME=nvidia \ - -e USERNAME=$USERNAME \ - -e GR00T_WBC_DIR="$DOCKER_HOME_DIR/Projects/$WORKTREE_NAME" \ - -v /dev/bus/usb:/dev/bus/usb \ - -v /tmp/.X11-unix:/tmp/.X11-unix \ - -v $HOME/.ssh:$DOCKER_HOME_DIR/.ssh \ - -v $HOME/.gear:$DOCKER_HOME_DIR/.gear \ - -v $HOME/.Xauthority:$DOCKER_HOME_DIR/.Xauthority \ - -v $PROJECT_DIR:$DOCKER_HOME_DIR/Projects/$(basename "$PROJECT_DIR") - --device /dev/snd \ - --group-add audio \ - -e PULSE_SERVER=unix:/run/user/$(id -u)/pulse/native \ - -v /run/user/$(id -u)/pulse/native:/run/user/$(id -u)/pulse/native \ - -v $HOME/.config/pulse/cookie:/home/$USERNAME/.config/pulse/cookie" - -# Check if RL mode first, then handle container logic -if [ "$DEPLOY" = true ]; then - # Deploy mode - use gr00t_wbc-deploy-${USERNAME} container - - # Always clean up old processes and create a new container - # Kill all gr00t_wbc processes across containers to prevent message passing conflicts - "$SCRIPT_DIR/kill_gr00t_wbc_processors.sh" - echo "Creating new deploy container..." - - # Clean up old processes and create a fresh deploy container - # Remove existing deploy container if it exists - if sudo docker ps -a --format '{{.Names}}' | grep -q "^$DEPLOY_CONTAINER$"; then - echo "Removing existing deploy container..." - sudo docker rm -f $DEPLOY_CONTAINER - fi - sudo docker run -it --rm $DOCKER_RUN_ARGS \ - -w $DOCKER_HOME_DIR/Projects/$WORKTREE_NAME \ - --name $DEPLOY_CONTAINER \ - $DEPLOY_CONTAINER \ - /bin/bash -ic 'exec "$0" "$@"' \ - "${DOCKER_HOME_DIR}/Projects/${WORKTREE_NAME}/docker/entrypoint/deploy.sh" \ - "${EXTRA_ARGS[@]}" -else - # Bash mode - use gr00t_wbc-bash-${USERNAME} container - if sudo docker ps -a --format '{{.Names}}' | grep -q "^$BASH_CONTAINER$"; then - echo "Bash container exists, starting it..." - sudo docker start $BASH_CONTAINER > /dev/null - sudo docker exec -it $BASH_CONTAINER /bin/bash - else - echo "Creating new bash container with auto-install gr00t_wbc..." - sudo docker run -it $DOCKER_RUN_ARGS \ - -w $DOCKER_HOME_DIR/Projects/$WORKTREE_NAME \ - --name $BASH_CONTAINER \ - $DEPLOY_CONTAINER \ - /bin/bash -ic 'exec "$0"' \ - "${DOCKER_HOME_DIR}/Projects/${WORKTREE_NAME}/docker/entrypoint/bash.sh" - fi -fi - -# Cleanup X11 permissions -$X11_ENABLED && xhost -local:docker 2>/dev/null diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..d0c3cbf --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000..125dc4e --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,10 @@ +# Sphinx documentation build dependencies +sphinx>=7.0,<9.0 +sphinx-book-theme>=1.1.0 +myst-parser>=3.0.0 +autodocsumm>=0.2.12 +sphinxemoji>=0.3.1 +sphinxcontrib-bibtex>=2.6.0 +sphinxcontrib-video>=0.2.1 +sphinx-copybutton>=0.5.2 +sphinx-design>=0.6.0 diff --git a/docs/source/.nojekyll b/docs/source/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/docs/source/_static/Keyboard_Guidance.mp4 b/docs/source/_static/Keyboard_Guidance.mp4 new file mode 100644 index 0000000..f0a1e1e Binary files /dev/null and b/docs/source/_static/Keyboard_Guidance.mp4 differ diff --git a/docs/source/_static/NVIDIA-logo-black.png b/docs/source/_static/NVIDIA-logo-black.png new file mode 100644 index 0000000..12e9545 Binary files /dev/null and b/docs/source/_static/NVIDIA-logo-black.png differ diff --git a/docs/source/_static/NVIDIA-logo-white.png b/docs/source/_static/NVIDIA-logo-white.png new file mode 100644 index 0000000..1143326 Binary files /dev/null and b/docs/source/_static/NVIDIA-logo-white.png differ diff --git a/docs/source/_static/Pipeline.jpg b/docs/source/_static/Pipeline.jpg new file mode 100644 index 0000000..51989f0 Binary files /dev/null and b/docs/source/_static/Pipeline.jpg differ diff --git a/docs/source/_static/css/custom.css b/docs/source/_static/css/custom.css new file mode 100644 index 0000000..d2e8f2b --- /dev/null +++ b/docs/source/_static/css/custom.css @@ -0,0 +1,102 @@ +/* + * NVIDIA-styled theme for GR00T-WholeBodyControl + * Based on IsaacLab documentation styling + * Colors reference: https://clrs.cc/ + */ + +/* anything related to the light theme */ +html[data-theme="light"] { + --pst-color-primary: #76B900; + --pst-color-secondary: #5b8e03; + --pst-color-secondary-highlight: #5b8e03; + --pst-color-inline-code-links: #76B900; + --pst-color-info: var(--pst-color-primary); + --pst-color-info-highlight: var(--pst-color-primary); + --pst-color-info-bg: #daedb9; + --pst-color-attention: #ffc107; + --pst-color-text-base: #323232; + --pst-color-text-muted: #646464; + --pst-color-shadow: #d8d8d8; + --pst-color-border: #c9c9c9; + --pst-color-inline-code: #76B900; + --pst-color-target: #fbe54e; + --pst-color-background: #fff; + --pst-color-on-background: #fff; + --pst-color-surface: #f5f5f5; + --pst-color-on-surface: #e1e1e1; + --pst-color-link: var(--pst-color-primary); + --pst-color-link-hover: #789841; + --pst-color-table-row-hover-bg: #daedb9; + --pst-color-accent: var(--pst-color-primary); +} + +/* anything related to the dark theme */ +html[data-theme="dark"] { + --pst-color-primary: #76B900; + --pst-color-secondary: #c2f26f; + --pst-color-secondary-highlight: #c2f26f; + --pst-color-inline-code-links: #b6e664; + --pst-color-info: var(--pst-color-primary); + --pst-color-info-highlight: var(--pst-color-primary); + --pst-color-info-bg: #3a550b; + --pst-color-attention: #dca90f; + --pst-color-text-base: #cecece; + --pst-color-text-muted: #a6a6a6; + --pst-color-shadow: #212121; + --pst-color-border: silver; + --pst-color-inline-code: #76B900; + --pst-color-target: #472700; + --pst-color-background: #121212; + --pst-color-on-background: #1e1e1e; + --pst-color-surface: #212121; + --pst-color-on-surface: #373737; + --pst-color-link: var(--pst-color-primary); + --pst-color-link-hover: #aee354; + --pst-color-table-row-hover-bg: #3a550b; + --pst-color-accent: var(--pst-color-primary); +} + +a { + text-decoration: none !important; +} + +/* for the announcement link */ +.bd-header-announcement a, +.bd-header-version-warning a { + color: #7FDBFF; +} + +/* for the search box in the navbar */ +.form-control { + border-radius: 0 !important; + border: none !important; + outline: none !important; +} + +/* reduce padding for logo */ +.navbar-brand { + padding-top: 0.0rem !important; + padding-bottom: 0.0rem !important; +} + +.navbar-icon-links { + padding-top: 0.0rem !important; + padding-bottom: 0.0rem !important; +} + +/* Remove "Built with Sphinx" footer */ +.footer-item__copyright, +.footer-item__theme { + display: none !important; +} + +/* Hide Sphinx attribution in footer */ +div.footer-item:has(.theme-switcher) ~ div.footer-item { + display: none !important; +} + +/* Cleaner footer styling */ +footer.bd-footer { + border-top: 1px solid var(--pst-color-border); + padding-top: 1rem; +} diff --git a/docs/source/_static/favicon.ico b/docs/source/_static/favicon.ico new file mode 100644 index 0000000..7b39f8a Binary files /dev/null and b/docs/source/_static/favicon.ico differ diff --git a/docs/source/_static/flashing.png b/docs/source/_static/flashing.png new file mode 100644 index 0000000..34de21b Binary files /dev/null and b/docs/source/_static/flashing.png differ diff --git a/docs/source/_static/kinematic_planner/Navigation.mp4 b/docs/source/_static/kinematic_planner/Navigation.mp4 new file mode 100644 index 0000000..319d905 Binary files /dev/null and b/docs/source/_static/kinematic_planner/Navigation.mp4 differ diff --git a/docs/source/_static/kinematic_planner/hand_crawling.mp4 b/docs/source/_static/kinematic_planner/hand_crawling.mp4 new file mode 100644 index 0000000..132563e Binary files /dev/null and b/docs/source/_static/kinematic_planner/hand_crawling.mp4 differ diff --git a/docs/source/_static/kinematic_planner/planner_boxing.mp4 b/docs/source/_static/kinematic_planner/planner_boxing.mp4 new file mode 100644 index 0000000..bb1f845 Binary files /dev/null and b/docs/source/_static/kinematic_planner/planner_boxing.mp4 differ diff --git a/docs/source/_static/kinematic_planner/planner_elbow_crawling.mp4 b/docs/source/_static/kinematic_planner/planner_elbow_crawling.mp4 new file mode 100644 index 0000000..64de419 Binary files /dev/null and b/docs/source/_static/kinematic_planner/planner_elbow_crawling.mp4 differ diff --git a/docs/source/_static/kinematic_planner/planner_happy.mp4 b/docs/source/_static/kinematic_planner/planner_happy.mp4 new file mode 100644 index 0000000..7d7034f Binary files /dev/null and b/docs/source/_static/kinematic_planner/planner_happy.mp4 differ diff --git a/docs/source/_static/kinematic_planner/planner_injured.mp4 b/docs/source/_static/kinematic_planner/planner_injured.mp4 new file mode 100644 index 0000000..0bd549d Binary files /dev/null and b/docs/source/_static/kinematic_planner/planner_injured.mp4 differ diff --git a/docs/source/_static/kinematic_planner/planner_kneeling.mp4 b/docs/source/_static/kinematic_planner/planner_kneeling.mp4 new file mode 100644 index 0000000..9f88996 Binary files /dev/null and b/docs/source/_static/kinematic_planner/planner_kneeling.mp4 differ diff --git a/docs/source/_static/kinematic_planner/planner_run.mp4 b/docs/source/_static/kinematic_planner/planner_run.mp4 new file mode 100644 index 0000000..3999ce0 Binary files /dev/null and b/docs/source/_static/kinematic_planner/planner_run.mp4 differ diff --git a/docs/source/_static/kinematic_planner/planner_stealth.mp4 b/docs/source/_static/kinematic_planner/planner_stealth.mp4 new file mode 100644 index 0000000..e5142cb Binary files /dev/null and b/docs/source/_static/kinematic_planner/planner_stealth.mp4 differ diff --git a/docs/source/_static/pico_setup/google_search_screenshot.png b/docs/source/_static/pico_setup/google_search_screenshot.png new file mode 100644 index 0000000..e552d82 Binary files /dev/null and b/docs/source/_static/pico_setup/google_search_screenshot.png differ diff --git a/docs/source/_static/pico_setup/internet.png b/docs/source/_static/pico_setup/internet.png new file mode 100644 index 0000000..ff390d6 Binary files /dev/null and b/docs/source/_static/pico_setup/internet.png differ diff --git a/docs/source/_static/pico_setup/picoSetUp.mp4 b/docs/source/_static/pico_setup/picoSetUp.mp4 new file mode 100644 index 0000000..40e935c Binary files /dev/null and b/docs/source/_static/pico_setup/picoSetUp.mp4 differ diff --git a/docs/source/_static/pico_setup/pico_setup_screenshot.png b/docs/source/_static/pico_setup/pico_setup_screenshot.png new file mode 100644 index 0000000..ae9173d Binary files /dev/null and b/docs/source/_static/pico_setup/pico_setup_screenshot.png differ diff --git a/docs/source/_static/pico_setup/pico_vr_screenshot.png b/docs/source/_static/pico_setup/pico_vr_screenshot.png new file mode 100644 index 0000000..557418c Binary files /dev/null and b/docs/source/_static/pico_setup/pico_vr_screenshot.png differ diff --git a/docs/source/_static/pico_setup/setuprecording.mp4 b/docs/source/_static/pico_setup/setuprecording.mp4 new file mode 100644 index 0000000..2f77930 Binary files /dev/null and b/docs/source/_static/pico_setup/setuprecording.mp4 differ diff --git a/docs/source/_static/pico_setup/xrrobot_setup.png b/docs/source/_static/pico_setup/xrrobot_setup.png new file mode 100644 index 0000000..fb881ef Binary files /dev/null and b/docs/source/_static/pico_setup/xrrobot_setup.png differ diff --git a/docs/source/_static/screws.png b/docs/source/_static/screws.png new file mode 100644 index 0000000..bef098a Binary files /dev/null and b/docs/source/_static/screws.png differ diff --git a/docs/source/_static/sim2sim.mp4 b/docs/source/_static/sim2sim.mp4 new file mode 100644 index 0000000..1746a07 Binary files /dev/null and b/docs/source/_static/sim2sim.mp4 differ diff --git a/docs/source/_static/sonic-preview-gif-480P.gif b/docs/source/_static/sonic-preview-gif-480P.gif new file mode 100644 index 0000000..c631e4f Binary files /dev/null and b/docs/source/_static/sonic-preview-gif-480P.gif differ diff --git a/docs/source/_static/ssd.png b/docs/source/_static/ssd.png new file mode 100644 index 0000000..bf2b10c Binary files /dev/null and b/docs/source/_static/ssd.png differ diff --git a/docs/source/_static/teleop/teleop_advanced.mp4 b/docs/source/_static/teleop/teleop_advanced.mp4 new file mode 100644 index 0000000..41a43e4 Binary files /dev/null and b/docs/source/_static/teleop/teleop_advanced.mp4 differ diff --git a/docs/source/_static/teleop/teleop_manipulation.mp4 b/docs/source/_static/teleop/teleop_manipulation.mp4 new file mode 100644 index 0000000..3d7cbd6 Binary files /dev/null and b/docs/source/_static/teleop/teleop_manipulation.mp4 differ diff --git a/docs/source/_static/teleop/teleop_natural_movement.mp4 b/docs/source/_static/teleop/teleop_natural_movement.mp4 new file mode 100644 index 0000000..71b3e4d Binary files /dev/null and b/docs/source/_static/teleop/teleop_natural_movement.mp4 differ diff --git a/docs/source/_static/teleop/teleop_pause.mp4 b/docs/source/_static/teleop/teleop_pause.mp4 new file mode 100644 index 0000000..bc017ef Binary files /dev/null and b/docs/source/_static/teleop/teleop_pause.mp4 differ diff --git a/docs/source/_static/teleop/teleop_session_overview.mp4 b/docs/source/_static/teleop/teleop_session_overview.mp4 new file mode 100644 index 0000000..0ab44f3 Binary files /dev/null and b/docs/source/_static/teleop/teleop_session_overview.mp4 differ diff --git a/docs/source/_static/teleop/teleop_walking.mp4 b/docs/source/_static/teleop/teleop_walking.mp4 new file mode 100644 index 0000000..79d6cba Binary files /dev/null and b/docs/source/_static/teleop/teleop_walking.mp4 differ diff --git a/docs/source/_static/teleop_calib/BAD_Calib_VR3PT_recover_web.mp4 b/docs/source/_static/teleop_calib/BAD_Calib_VR3PT_recover_web.mp4 new file mode 100644 index 0000000..05e2ad6 Binary files /dev/null and b/docs/source/_static/teleop_calib/BAD_Calib_VR3PT_recover_web.mp4 differ diff --git a/docs/source/_static/teleop_calib/BAD_Calib_VR3PT_web.mp4 b/docs/source/_static/teleop_calib/BAD_Calib_VR3PT_web.mp4 new file mode 100644 index 0000000..69f48de Binary files /dev/null and b/docs/source/_static/teleop_calib/BAD_Calib_VR3PT_web.mp4 differ diff --git a/docs/source/_static/teleop_calib/basic_flow_web.mp4 b/docs/source/_static/teleop_calib/basic_flow_web.mp4 new file mode 100644 index 0000000..60f87da Binary files /dev/null and b/docs/source/_static/teleop_calib/basic_flow_web.mp4 differ diff --git a/docs/source/api/index.md b/docs/source/api/index.md new file mode 100644 index 0000000..991b7c3 --- /dev/null +++ b/docs/source/api/index.md @@ -0,0 +1,3 @@ +# API Reference + +Coming soon... diff --git a/docs/source/api/teleop.md b/docs/source/api/teleop.md new file mode 100644 index 0000000..32f9c30 --- /dev/null +++ b/docs/source/api/teleop.md @@ -0,0 +1,3 @@ +# Teleoperation API + +Coming soon... diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 0000000..f80caba --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,145 @@ +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +import os +import sys + +# -- Project information ----------------------------------------------------- + +project = 'GR00T-WholeBodyControl' +copyright = '2026, NVIDIA' +author = 'NVIDIA GEAR Team' +release = '1.0.0' +version = '1.0' + +# -- General configuration --------------------------------------------------- + +extensions = [ + 'autodocsumm', + 'myst_parser', + 'sphinx.ext.napoleon', + 'sphinxemoji.sphinxemoji', + 'sphinx.ext.autodoc', + 'sphinx.ext.autosummary', + 'sphinx.ext.githubpages', + 'sphinx.ext.intersphinx', + 'sphinx.ext.mathjax', + 'sphinx.ext.todo', + 'sphinx.ext.viewcode', + 'sphinxcontrib.bibtex', + 'sphinx_copybutton', + 'sphinx_design', + 'sphinxcontrib.video', +] + +# mathjax hacks +mathjax3_config = { + "tex": { + "inlineMath": [["\\(", "\\)"]], + "displayMath": [["\\[", "\\]"]], + }, +} + +# emoji style +sphinxemoji_style = "twemoji" + +# supported file extensions for source files +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} + +# BibTeX configuration +bibtex_bibfiles = [] + +# generate autosummary even if no references +autosummary_generate = True +autosummary_generate_overwrite = False + +# generate links to the documentation of objects in external projects +intersphinx_mapping = { + 'python': ('https://docs.python.org/3', None), + 'numpy': ('https://numpy.org/doc/stable/', None), + 'torch': ('https://pytorch.org/docs/stable/', None), +} + +templates_path = ['_templates'] +exclude_patterns = ['_build', '_templates', 'Thumbs.db', '.DS_Store'] + +# List of zero or more Sphinx-specific warning categories to be squelched +suppress_warnings = [ + "ref.python", +] + +# -- MyST Parser configuration ----------------------------------------------- + +myst_enable_extensions = [ + "colon_fence", + "deflist", + "tasklist", +] + +# -- Options for HTML output ------------------------------------------------- + +import sphinx_book_theme + +html_title = "GR00T-WholeBodyControl Documentation" +html_theme_path = [sphinx_book_theme.get_html_theme_path()] +html_theme = "sphinx_book_theme" +html_favicon = "_static/favicon.ico" +html_show_copyright = True +html_show_sphinx = False # This removes "Built with Sphinx" footer +html_last_updated_fmt = "" + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] +html_css_files = ["css/custom.css"] + +html_theme_options = { + "path_to_docs": "docs/", + "collapse_navigation": True, + "repository_url": "https://github.com/NVlabs/GR00T-WholeBodyControl", + "use_repository_button": True, + "use_issues_button": True, + "use_edit_page_button": True, + "show_toc_level": 1, + "use_sidenotes": True, + "logo": { + "text": "GR00T-WholeBodyControl Documentation", + "image_light": "_static/NVIDIA-logo-white.png", + "image_dark": "_static/NVIDIA-logo-black.png", + }, + "icon_links": [ + { + "name": "GitHub", + "url": "https://github.com/NVlabs/GR00T-WholeBodyControl", + "icon": "fa-brands fa-square-github", + "type": "fontawesome", + }, + { + "name": "GEAR-SONIC Website", + "url": "https://nvlabs.github.io/GEAR-SONIC/", + "icon": "fa-solid fa-globe", + "type": "fontawesome", + }, + { + "name": "Paper", + "url": "https://arxiv.org/abs/2511.07820", + "icon": "fa-solid fa-file-pdf", + "type": "fontawesome", + }, + ], + "icon_links_label": "Quick Links", +} + +templates_path = [ + "_templates", +] + +# -- Internationalization ---------------------------------------------------- + +language = "en" diff --git a/docs/source/getting_started/download_models.md b/docs/source/getting_started/download_models.md new file mode 100644 index 0000000..f5efbf0 --- /dev/null +++ b/docs/source/getting_started/download_models.md @@ -0,0 +1,140 @@ +# Downloading Model Checkpoints + +Pre-trained GEAR-SONIC checkpoints (ONNX format) are hosted on Hugging Face: + +**[nvidia/GEAR-SONIC](https://huggingface.co/nvidia/GEAR-SONIC)** + +## Quick Download + +### Install the dependency + +```bash +pip install huggingface_hub +``` + +### Run the download script + +From the repo root, run: + +```bash +python download_from_hf.py +``` + +This downloads the **latest** policy encoder + decoder + kinematic planner into +`gear_sonic_deploy/`, preserving the same directory layout the deployment binary expects. + +--- + +## Options + +| Flag | Description | +|------|-------------| +| `--no-planner` | Skip the kinematic planner download | +| `--output-dir PATH` | Override the destination directory | +| `--token TOKEN` | HF token (alternative to `huggingface-cli login`) | + +### Examples + +```bash +# Policy + planner (default) +python download_from_hf.py + +# Policy only +python download_from_hf.py --no-planner + +# Download into a custom directory +python download_from_hf.py --output-dir /data/gear-sonic +``` + +--- + +## Manual download via CLI + +If you prefer the Hugging Face CLI: + +```bash +pip install huggingface_hub[cli] + +# Policy only +huggingface-cli download nvidia/GEAR-SONIC \ + model_encoder.onnx \ + model_decoder.onnx \ + observation_config.yaml \ + --local-dir gear_sonic_deploy + +# Everything (policy + planner) +huggingface-cli download nvidia/GEAR-SONIC --local-dir gear_sonic_deploy +``` + +--- + +## Manual download via Python + +```python +from huggingface_hub import hf_hub_download + +REPO_ID = "nvidia/GEAR-SONIC" + +encoder = hf_hub_download(repo_id=REPO_ID, filename="model_encoder.onnx") +decoder = hf_hub_download(repo_id=REPO_ID, filename="model_decoder.onnx") +config = hf_hub_download(repo_id=REPO_ID, filename="observation_config.yaml") +planner = hf_hub_download(repo_id=REPO_ID, filename="planner_sonic.onnx") + +print("Policy encoder :", encoder) +print("Policy decoder :", decoder) +print("Obs config :", config) +print("Planner :", planner) +``` + +--- + +## Available files + +``` +nvidia/GEAR-SONIC/ +├── model_encoder.onnx # Policy encoder +├── model_decoder.onnx # Policy decoder +├── observation_config.yaml # Observation configuration +└── planner_sonic.onnx # Kinematic planner +``` + +The download script places them into the layout the deployment binary expects: + +``` +gear_sonic_deploy/ +├── policy/release/ +│ ├── model_encoder.onnx +│ ├── model_decoder.onnx +│ └── observation_config.yaml +└── planner/target_vel/V2/ + └── planner_sonic.onnx +``` + +--- + +## Authentication + +The repository is **public** — no token required for downloading. + +If you hit rate limits or need to access private forks: + +```bash +# Option 1: CLI login (recommended — token is saved once) +huggingface-cli login + +# Option 2: environment variable +export HF_TOKEN="hf_..." +python download_from_hf.py + +# Option 3: pass token directly +python download_from_hf.py --token hf_... +``` + +Get a free token at [huggingface.co/settings/tokens](https://huggingface.co/settings/tokens). + +--- + +## Next steps + +After downloading, follow the [Quick Start](quickstart.md) guide to run the +deployment stack in MuJoCo simulation or on real hardware. diff --git a/docs/source/getting_started/installation_deploy.md b/docs/source/getting_started/installation_deploy.md new file mode 100644 index 0000000..02b308f --- /dev/null +++ b/docs/source/getting_started/installation_deploy.md @@ -0,0 +1,145 @@ +# Installation (Deployment) + +## Prerequisites + +**Required for all setups:** +- **Ubuntu 20.04/22.04/24.04** or other Debian-based Linux distributions +- **CUDA Toolkit** (for GPU acceleration) +- **TensorRT** (for inference optimization) — **Install this first!** +- **Jetpack 6** (for onboard deployment) +- Python 3.8+ +- Git with LFS support + +**Download TensorRT** from [NVIDIA Developer](https://developer.nvidia.com/tensorrt/download/10x): + +| Platform | TensorRT Version | +|---|---| +| x86_64 (Desktop) | **10.13** | +| Jetson / G1 onboard Orin | **10.7** (requires JetPack 6 — [flashing guide](../references/jetpack6.md)) | + +```{tip} +Download the **TAR** package (not the DEB one) so you can extract TensorRT to any location. The archive is ~10 GB; consider using `pv` to monitor progress: +``` + +```{warning} +The versions listed above are the tested versions. Different versions of TensorRT may cause inference issues. If you intend to use another version, test in simulation first. +``` + +```sh +sudo apt-get install -y pv +pv TensorRT-*.tar.gz | tar -xz -f - +``` + +Move the unzipped TensorRT to `~/TensorRT` (or similar) and add to your `~/.bashrc`: + +```sh +export TensorRT_ROOT=$HOME/TensorRT +``` + +## Clone the Repository + +```bash +git clone https://github.com/NVlabs/GR00T-WholeBodyControl.git +cd GR00T-WholeBodyControl +git lfs pull # make sure all large files are fetched +``` + +## Setup + +### Native Development (Recommended) + +**Advantages:** Direct system installation, faster builds, production-ready. + +```{warning} +For G1 onboard deployment, we require the onboard Orin to be upgraded to Jetpack 6 to support TensorRT. Please follow the [flashing guide](../references/jetpack6.md) for upgrading! +``` + +**Prerequisites:** +- Basic development tools (cmake, git, etc.) +- (Optional) ROS2 if you plan to use ROS2-based input/output + +**Setup steps:** + +1. **Install system dependencies:** + +```sh +cd gear_sonic_deploy +chmod +x scripts/install_deps.sh +./scripts/install_deps.sh +``` + +2. **Set up environment:** + +```sh +source scripts/setup_env.sh +``` + +The setup script will automatically: +- Configure TensorRT environment +- Set up all necessary paths + +For convenience, you can add the environment setup to your shell profile: + +```sh +echo "source $(pwd)/scripts/setup_env.sh" >> ~/.bashrc +``` + +3. **Build the project:** + +```sh +just build +``` + +### Docker (ROS2 Development Environment) + +We provide a unified Docker environment with ROS2 Humble, supporting x86_64 and Jetson platforms. + +**Prerequisites:** +- Docker installed and user added to docker group +- `TensorRT_ROOT` environment variable set on host +- For Jetson: JetPack 6.1+ (CUDA 12.6) + +**Quick Setup:** + +```sh +# 1. Add user to docker group (one-time setup) +sudo usermod -aG docker $USER +newgrp docker + +# 2. Set TensorRT path (add to ~/.bashrc for persistence) +export TensorRT_ROOT=/path/to/TensorRT + +# 3. Launch container +cd gear_sonic_deploy +./docker/run-ros2-dev.sh +``` + +**Options:** + +```sh +./docker/run-ros2-dev.sh # Standard build (fast) +./docker/run-ros2-dev.sh --rebuild # Force rebuild +./docker/run-ros2-dev.sh --with-opengl # Include OpenGL for visualization (RViz, Gazebo) +``` + +**Architecture Support:** +- **x86_64**: CUDA 12.4.1 (requires NVIDIA driver 550+) +- **Jetson**: CUDA 12.4.1 container on CUDA 12.6 host (forward compatible) + +**Inside the container:** + +```sh +source scripts/setup_env.sh # set up dependency +just build # Build +just --list # Show all commands +``` + +**Troubleshooting:** +- If you get "permission denied", ensure you're in the docker group +- TensorRT must be set on the **host** before starting container +- For Jetson: Run `source scripts/setup_env.sh` on host first (sets jetson_clocks) + + + + + diff --git a/docs/source/getting_started/installation_training.md b/docs/source/getting_started/installation_training.md new file mode 100644 index 0000000..988f649 --- /dev/null +++ b/docs/source/getting_started/installation_training.md @@ -0,0 +1,3 @@ +# Installation (Training) + +Coming soon! \ No newline at end of file diff --git a/docs/source/getting_started/quickstart.md b/docs/source/getting_started/quickstart.md new file mode 100644 index 0000000..1d85e93 --- /dev/null +++ b/docs/source/getting_started/quickstart.md @@ -0,0 +1,91 @@ +# Quick Start + +Get started with SONIC in minutes! + + First, ensure you've completed the [installation guide](installation_deploy). + +```{admonition} Safety Warning +:class: danger +Robots can be dangerous. Ensure a clear safety zone, keep a safety operator ready to trigger an emergency stop in front of the keyboard, and use this software at your own risk. The authors and contributors are not responsible for any damage, injury, or loss caused by use or misuse of this project. +``` + +## IsaacLab Eval + +Coming soon! + + +## Sim2Sim in MuJoCo + + + +For testing in a MuJoCo simulator, run the simulation loop and deployment script in separate terminals. + +```{note} +The MuJoCo simulator (Terminal 1) runs on the **host** in a Python virtual environment — it is **not** inside the Docker container. The deployment binary (Terminal 2) can run either natively on the host or inside the Docker container. If you are using Docker, run Terminal 1 on the host and Terminal 2 inside the container. +``` + +### One-time setup: install the MuJoCo sim environment + +On the **host** (outside Docker), from the **repo root** (`GR00T-WholeBodyControl/`), run: + +```sh +bash install_scripts/install_mujoco_sim.sh +``` + +This creates a lightweight `.venv_sim` virtual environment with only the packages needed for the simulator (MuJoCo, Pinocchio, Unitree SDK2, etc.). + +### Running the sim2sim loop + +We highly recommend running through this process and getting familiar with the controls in simulation before deploying on real hardware. + +**Terminal 1 — MuJoCo simulator** (host, from repo root): + +```sh +source .venv_sim/bin/activate +python gear_sonic/scripts/run_sim_loop.py +``` + +**Terminal 2 — Deployment** (host or Docker, from `gear_sonic_deploy/`): + +```sh +bash deploy.sh sim +``` + +**Starting Control:** + +1. In Terminal 2 (deploy.sh), press **`]`** to start the policy. +2. Click on the MuJoCo viewer window, press **`9`** to drop the robot to the ground. +3. Go back to Terminal 2. Press **`T`** to play the current reference motion — the robot will execute it to completion. +4. Press **`N`** or **`P`** to switch to the next or previous motion sequence. +5. Press **`T`** again to play the new motion. +6. You can press **`T`** again to replay the same motion once it has finished. If you want to stop and go back to the first frame of the current motion, press **`R`** to restart it from the beginning. This can be used to stop the motion without terminating the policy. +7. When you are done or need an **emergency stop**, press **`O`** to stop control and exit. + +For more controls, see the tutorials for [Keyboard](../tutorials/keyboard.md), [Gamepad](../tutorials/gamepad.md), [ZMQ Streaming](../tutorials/zmq.md), and [Interface Manager](../tutorials/manager.md). + +## Real Robot + +To deploy on the real G1 robot, run: + +```sh +./deploy.sh real +``` + +## Online Visualization + +Start the visualizer and connect to a running `g1_deploy` executable: + +```sh +python visualize_motion.py --realtime_debug_url tcp://localhost:5557 +``` + +Notes: +- Default port: 5557 (change with `--zmq-out-port `) +- Default topic: `g1_debug` (change with `--zmq-out-topic ` on executable, `--realtime_debug_topic ` on visualizer) +- For physical robots, replace `localhost` with the robot's IP address + +For offline motion CSV visualization and logging details, see [Deployment Code & Program Flow](../references/deployment_code.md). + +For more advanced usage, see the tutorials for [Keyboard](../tutorials/keyboard.md), [Gamepad](../tutorials/gamepad.md), [ZMQ Streaming](../tutorials/zmq.md), [VR Whole-Body Teleop](../tutorials/vr_wholebody_teleop.md), and [Interface Manager](../tutorials/manager.md). diff --git a/docs/source/getting_started/vr_teleop_setup.md b/docs/source/getting_started/vr_teleop_setup.md new file mode 100644 index 0000000..f6fd8e3 --- /dev/null +++ b/docs/source/getting_started/vr_teleop_setup.md @@ -0,0 +1,147 @@ +# VR Teleop Setup (PICO) + +This page covers the one-time hardware and software setup for PICO VR whole-body teleoperation. After completing these steps, proceed to the [ZMQ Manager tutorial](../tutorials/vr_wholebody_teleop.md) to run teleop in sim or on real hardware. + +--- + +## Required Hardware + +- [PICO 4 / PICO 4 Pro headset](https://www.picoxr.com/global/products/pico4) +- [2x PICO controllers](https://www.picoxr.com/global/products/pico4) +- [2x PICO motion trackers](https://www.picoxr.com/global/products/pico-motion-tracker) (strapped to ankles) +- A high-speed, low-latency Wi-Fi connection; teleoperation performance is heavily dependent on network quality. + +--- + +## Step 1: Install XRoboToolkit + +XRoboToolkit consists of a PC service (running on your workstation) and a PICO app (running on the headset) that streams body-tracking data. + +### PC Service + +1. Go to [https://github.com/XR-Robotics](https://github.com/XR-Robotics). +2. Follow the **"Install XRoboToolkit-PC-Service"** instructions to install the PC service on your workstation. +3. For onboard run `sudo dpkg -i gear_sonic_deploy/thirdparty/roboticsservice_1.0.0.0_arm64.deb` + +### PICO App + +1. Wear the PICO headset to begin the setup and installation process. +2. Complete the quick setup on PICO. +3. Make sure the PICO is connected to Wi-Fi. +4. Open the browser application in the PICO. +5. Type **"xrobotoolkit"** in the search bar and select the GitHub page [https://github.com/XR-Robotics](https://github.com/XR-Robotics). + +```{image} ../_static/pico_setup/google_search_screenshot.png +:width: 600px +:align: center +``` + +6. Make sure **Developer Mode** is enabled (Settings → Developer). +7. **[INSIDE PICO]** Scroll down in the GitHub page until you see the APK download option and click with the PICO trigger to download it. + +```{tip} +Download [XRoboToolkit-PICO-1.1.1.apk](https://github.com/XR-Robotics/XRoboToolkit-Unity-Client/releases/download/v1.1.1/XRoboToolkit-PICO-1.1.1.apk) on PICO using the browser. ([Other Versions](https://github.com/XR-Robotics/XRoboToolkit-Unity-Client/releases)) +``` + +8. **[INSIDE PICO]** Open the manage downloads option on the top right section of the browser page and click to open the `XRoboToolkit-PICO-1.1.1.apk` download. +9. **[INSIDE PICO]** Select **Install** — the application will appear in the **Unknown** section of your library. + +--- + +## Step 2: Motion Tracker Setup + +```{image} ../_static/pico_setup/pico_setup_screenshot.png +:width: 600px +:align: center +``` + +1. Strap one PICO motion tracker to your left ankle and one to your right ankle. **Scrunch** down any baggy clothing so the trackers are visible. Make sure the side with the light indicator faces up. +2. Go to PICO settings. In the menu on the left, scroll down to the last option: **"Developer"**. Make sure **"Safeguard"** is turned off. + - If the Developer option is not active, tap on "Software" until it appears. +3. Click the **Wi-Fi icon** in the PICO menu. A picture of the headset will appear. Above the headset, there will be a small circular logo for the motion trackers. If there is no logo, open the **"Motion Tracker"** app itself. + - Headset and 2 controllers will populate — select **Motion Tracker** (small circle). +4. Next to each tracker, there is an **"i"** icon. Click on this and **unpair all trackers**. +5. Once all trackers are cleared, click the **"Pair"** button in the top right corner. +6. Press and hold the button on the top of each motion tracker for **6 seconds**. Once in pairing mode, the lights will flash red and blue. + +### Motion Tracker Calibration + +1. Wear the PICO headset over your eyes. +2. Press the blue **"Calibrate"** button and follow the two calibration sequences: + - **Sequence 1:** Stand stiff with the handheld controllers down by your sides. + - **Sequence 2:** Look down at the foot motion trackers until the headset cameras recognize them. +3. Once calibrated, wear the PICO headset around your forehead (ensuring PICO faces forward to continue detecting motion trackers). + +--- + +## Step 3: Install the PICO Teleop Environment + +From the **repo root**: + +```bash +bash install_scripts/install_pico.sh +``` + +This creates a `.venv_teleop` virtual environment (Python 3.10) that includes: +- `teleop` extra (ZMQ, Pinocchio, PyVista) +- `sim` extra (MuJoCo, tyro) +- XRoboToolkit SDK +- Unitree SDK2 Python bindings + +Activate it with: + +```bash +source .venv_teleop/bin/activate # prompt: (gear_sonic_teleop) +``` + +--- + +## Step 4: Connect the PICO to Your Workstation + +:::{important} +Before connecting the PICO, you must first launch the teleop streamer on your workstation so the PC Service has something to connect to: + +```bash +source .venv_teleop/bin/activate +python gear_sonic/scripts/pico_manager_thread_server.py --manager \ + --vis_vr3pt --vis_smpl +``` + +Leave this running in a terminal throughout the setup steps below. This is part of the [VR Whole-Body Teleop tutorial](../tutorials/vr_wholebody_teleop.md). + +If the PICO cannot reach your workstation, check your firewall — disable it or add a rule to allow the XRoboToolkit port. +::: + + +1. Open the Wi-Fi settings on both the laptop/PC and PICO and ensure they are on the **same Wi-Fi network**. Take note of the Wi-Fi IPv4 address. + - To find the PICO's Wi-Fi, select the control center on the bottom right of the menu. + +```{image} ../_static/pico_setup/internet.png +:width: 600px +:align: center +``` + +```{image} ../_static/pico_setup/pico_vr_screenshot.png +:width: 600px +:align: center +``` + +2. Open the **XRoboToolKit** application. Enter the IP address of the laptop by clicking **"Enter"** next to "PC Service:". You will know it is properly connected if **WORKING** appears next to "Status:". + - If your IP address is already inputted, select **"Reconnect"** where it says "Status:" in the Network section. + +3. Make sure the following boxes are ticked as shown in the picture below: + - **"Head"** and **"Controller"** under the "Tracking" section. + - For Data/Control, make sure to select the **"Send"** button. + - For "Pico Motion Tracker" make sure to select **"Full body"**. + +```{image} ../_static/pico_setup/xrrobot_setup.png +:width: 600px +:align: center +``` + + +--- + +## Next Steps + +Your PICO hardware and software are now ready. Proceed to the [ZMQ Manager (`zmq_manager`) tutorial](../tutorials/vr_wholebody_teleop.md) to run whole-body teleoperation in simulation or on the real robot. diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 0000000..7c37e7c --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,129 @@ +GR00T-WholeBodyControl Documentation +==================================== + +.. image:: https://img.shields.io/badge/License-Apache%202.0%20%7C%20NVIDIA%20Open%20Model-blue.svg + :target: resources/license.html + :alt: License + +.. image:: https://img.shields.io/badge/IsaacLab-2.3.0-blue.svg + :target: https://github.com/isaac-sim/IsaacLab/releases/tag/v2.3.0 + :alt: IsaacLab + +Welcome to the official documentation for **GR00T Whole-Body Control (WBC)**! This is a unified platform for developing and deploying advanced humanoid controllers. + + +What is GR00T-WholeBodyControl? +-------------------------------- + +This codebase serves as the foundation for: + +- **Decoupled WBC** models used in NVIDIA Isaac-Gr00t, Gr00t N1.5 and N1.6 (see :doc:`detailed reference `) +- **GEAR-SONIC Series**: State-of-the-art controllers from the GEAR team + +GEAR-SONIC +---------- +.. image:: _static/sonic-preview-gif-480P.gif + :width: 100% + :align: center + + +.. raw:: html + +

+ Website + Paper + GitHub +

+ +**SONIC** is a humanoid behavior foundation model that gives robots a core set of motor skills learned from large-scale human motion data. Rather than building separate controllers for every motion, SONIC uses motion tracking as a scalable training task so a single unified policy can produce natural, whole-body movement and support a wide range of behaviors. + +🎯 Key Features: + +- 🚶 Natural whole-body locomotion (walking, crawling, dynamic movements) +- 🎮 Real-time VR teleoperation support +- 🤖 Foundation for higher-level planning and interaction +- 📦 Ready-to-deploy C++ inference stack + +Quick Start: Sim2Sim +-------------------- + +Quickly test the SONIC deployment stack in MuJoCo before deploying on real hardware. + +.. raw:: html + + + +.. tip:: + + **Get running in minutes!** Follow the :doc:`Installation ` and :doc:`Quickstart ` guides to see this in action on your machine. + +Documentation +------------- + +.. toctree:: + :maxdepth: 2 + :caption: Getting Started + + getting_started/installation_deploy + getting_started/download_models + getting_started/quickstart + getting_started/vr_teleop_setup +.. getting_started/installation_training + +.. toctree:: + :maxdepth: 2 + :caption: Tutorials + + tutorials/keyboard + tutorials/gamepad + tutorials/zmq + tutorials/manager + tutorials/vr_wholebody_teleop + +.. toctree:: + :maxdepth: 2 + :caption: Best Practices + + user_guide/teleoperation + +.. user_guide/configuration +.. user_guide/training +.. user_guide/troubleshooting + +.. toctree:: + :maxdepth: 2 + :caption: API Reference + +.. api/index +.. api/teleop + +.. toctree:: + :maxdepth: 2 + :caption: Reference Documentation + + references/index + references/deployment_code + references/observation_config + references/motion_reference + references/planner_onnx + references/jetpack6 + references/decoupled_wbc + + +.. toctree:: + :maxdepth: 1 + :caption: Additional Resources + + resources/citations + resources/license + resources/support +.. resources/contributing + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/source/references/decoupled_wbc.md b/docs/source/references/decoupled_wbc.md new file mode 100644 index 0000000..41b93cb --- /dev/null +++ b/docs/source/references/decoupled_wbc.md @@ -0,0 +1,155 @@ +# Decoupled WBC (N1.5 / N1.6) + +Software stack for loco-manipulation experiments across multiple humanoid platforms, with primary support for the Unitree G1. This repository provides whole-body control policies, a teleoperation stack, and a data exporter. + +--- + +## System Installation + +### Prerequisites +- Ubuntu 22.04 +- NVIDIA GPU with a recent driver +- Docker and NVIDIA Container Toolkit (required for GPU access inside the container) + +### Repository Setup + +Install Git and Git LFS: + +```bash +sudo apt update +sudo apt install git git-lfs +git lfs install +``` + +Clone the repository: + +```bash +mkdir -p ~/Projects +cd ~/Projects +git clone https://github.com/NVlabs/GR00T-WholeBodyControl.git +cd decoupled_wbc +``` + +### Docker Environment + +We provide a Docker image with all dependencies pre-installed. + +Install a fresh image and start a container: + +```bash +./docker/run_docker.sh --install --root +``` + +This pulls the latest `decoupled_wbc` image from `docker.io/nvgear`. + +Start or re-enter a container: + +```bash +./docker/run_docker.sh --root +``` + +Use `--root` to run as the `root` user. To run as a normal user, build the image locally: + +```bash +./docker/run_docker.sh --build +``` + +--- + +## Running the Control Stack + +Once inside the container, the control policies can be launched directly. + +- Simulation: + +```bash +python decoupled_wbc/control/main/teleop/run_g1_control_loop.py +``` + +- Real robot: Ensure the host machine network is configured per the [G1 SDK Development Guide](https://support.unitree.com/home/en/G1_developer) and set a static IP at `192.168.123.222`, subnet mask `255.255.255.0`: + +```bash +python decoupled_wbc/control/main/teleop/run_g1_control_loop.py --interface real +``` + +Keyboard shortcuts (terminal window): +- `]`: Activate policy +- `o`: Deactivate policy +- `9`: Release / Hold the robot +- `w` / `s`: Move forward / backward +- `a` / `d`: Strafe left / right +- `q` / `e`: Rotate left / right +- `z`: Zero navigation commands +- `1` / `2`: Raise / lower the base height +- `backspace` (viewer): Reset the robot in the visualizer + +--- + +## Running the Teleoperation Stack + +The teleoperation policy primarily uses Pico controllers for coordinated hand and body control. It also supports other teleoperation devices, including LeapMotion and HTC Vive with Nintendo Switch Joy-Con controllers. + +Keep `run_g1_control_loop.py` running, and in another terminal run: + +```bash +python decoupled_wbc/control/main/teleop/run_teleop_policy_loop.py --hand_control_device=pico --body_control_device=pico +``` + +### Pico Setup and Controls + +Configure the teleop app on your Pico headset by following the [XR Robotics guidelines](https://github.com/XR-Robotics). + +The necessary PC software is pre-installed in the Docker container. Only the [XRoboToolkit-PC-Service](https://github.com/XR-Robotics/XRoboToolkit-PC-Service) component is needed. + +Prerequisites: Connect the Pico to the same network as the host computer. + +Controller bindings: +- `menu + left trigger`: Toggle lower-body policy +- `menu + right trigger`: Toggle upper-body policy +- `Left stick`: X/Y translation +- `Right stick`: Yaw rotation +- `L/R triggers`: Control hand grippers + +Pico unit test: + +```bash +python decoupled_wbc/control/teleop/streamers/pico_streamer.py +``` + +--- + +## Running the Data Collection Stack + +Run the full stack (control loop, teleop policy, and camera forwarder) via the deployment helper: + +```bash +python decoupled_wbc/scripts/deploy_g1.py \ + --interface sim \ + --camera_host localhost \ + --sim_in_single_process \ + --simulator robocasa \ + --image-publish \ + --enable-offscreen \ + --env_name PnPBottle \ + --hand_control_device=pico \ + --body_control_device=pico +``` + +The `tmux` session `g1_deployment` is created with panes for: +- `control_data_teleop`: Main control loop, data collection, and teleoperation policy +- `camera`: Camera forwarder +- `camera_viewer`: Optional live camera feed + +Operations in the `controller` window (`control_data_teleop` pane, left): +- `]`: Activate policy +- `o`: Deactivate policy +- `k`: Reset the simulation and policies +- `` ` ``: Terminate the tmux session +- `ctrl + d`: Exit the shell in the pane + +Operations in the `data exporter` window (`control_data_teleop` pane, right top): +- Enter the task prompt + +Operations on Pico controllers: +- `A`: Start/Stop recording +- `B`: Discard trajectory diff --git a/docs/source/references/deployment_code.md b/docs/source/references/deployment_code.md new file mode 100644 index 0000000..c8f3e9f --- /dev/null +++ b/docs/source/references/deployment_code.md @@ -0,0 +1,276 @@ +# C++ Deployment Program Flow + +This document describes how the main executables run, their arguments, and the logging/configuration options. + +## Program Pipeline + +High-level flow (matches current code): +- Input interfaces: `keyboard | gamepad | gamepad_manager | zmq | zmq_manager | ros2 | manager` +- Optional planner (when enabled) generates target animations +- Motion reader provides reference motions for non-planner mode +- Policy inference (TensorRT; optional encoder → decoder) +- Output publishing via `--output-type ` + +## Available Commands + +```sh +just build # Build main project +just clean # Clean build artifacts +just --list # Show all available commands +``` + +## Run + +### Frequency Test + +Load an ONNX model and print input/output info. This is a sanity check for model loading; the reported frequency is not TensorRT inference speed. + +```sh +# Basic usage with default settings (1000 iterations, random data) +just run freq_test policy/example/model_step_000000.onnx + +# Custom iterations and data mode +just run freq_test policy/example/model_step_000000.onnx 5000 random +``` + +**Usage:** `just run freq_test [iterations] [data_mode]` +- `model_file`: Path to ONNX model file (required) +- `iterations`: Number of inference iterations (default: 1000) +- `data_mode`: Input data type — `zeros|random|ones` (default: random) + +### Policy Deployment + +Deploy ONNX policy on G1 robot with motion reference control: + +```sh +# Example command (real robot) +just run g1_deploy_onnx_ref enP8p1s0 policy/release/model_decoder.onnx reference/example/ \ + --obs-config policy/release/observation_config.yaml \ + --encoder-file policy/release/model_encoder.onnx \ + --planner-file planner/target_vel/V2/planner_sonic.onnx \ + --input-type manager \ + --enable-motion-recording \ + --enable-csv-logs + +# MuJoCo simulation (disables CRC validation) +python ../gear_sonic/scripts/run_sim_loop.py +just run g1_deploy_onnx_ref lo policy/release/model_decoder.onnx reference/example/ \ + --obs-config policy/release/observation_config.yaml \ + --encoder-file policy/release/model_encoder.onnx \ + --planner-file planner/target_vel/V2/planner_sonic.onnx \ + --input-type manager \ + --enable-motion-recording \ + --enable-csv-logs \ + --disable-crc-check +``` + +**Usage:** `just run g1_deploy_onnx_ref [options...]` + +**Required Arguments:** +- `network_interface`: Network interface for DDS communication (e.g., `eth0`, `enp5s0`, `enP8p1s0`, `lo`) +- `model_file`: Path to ONNX policy model file +- `motion_data_path`: Path to motion data directory containing reference motions + +**Optional Arguments:** + +**Model Configuration:** +- `--obs-config `: Path to observation configuration YAML file +- `--encoder-file `: Path to ONNX encoder model file (optional, for token-based policies) +- `--planner-file `: Path to ONNX planner model file (required for ROS2, `gamepad_manager`, and `zmq_manager` planner mode) +- `--planner-precision <16|32>`: Floating point precision for planner (default: 32) +- `--policy-precision <16|32>`: Floating point precision for policy (default: 32) + +**Output Mode:** +- `--output-type `: Output interface for publishing control results + - `zmq` — Publish via ZMQ (default) + - `ros2` — Publish via ROS2 (only if built with ROS2 support) + - `all` — Create all available output interfaces simultaneously + +**Input Mode:** +- `--input-type `: Input interface type (default: `keyboard`) + - `keyboard` — Direct keyboard input + - `gamepad` — Wireless controller + - `gamepad_manager` — Gamepad + quick switching to ZMQ/ROS2 + - `zmq` — Network motion streaming + - `zmq_manager` — Dynamic switching between planner and network motion streaming + - `manager` — Dynamic switching between keyboard, gamepad, ZMQ, and ROS2 + - `ros2` — ROS2 topic control (requires planner, only if built with ROS2 support) + +**ZMQ Configuration (when using `--input-type zmq`, `zmq_manager`, `demo_gamepad_manager`, or `manager`):** +- `--zmq-host `: ZMQ server host (default: `localhost`) +- `--zmq-port `: ZMQ server port (default: `5556`) +- `--zmq-topic `: ZMQ topic/prefix (default: `pose`) +- `--zmq-conflate`: Enable ZMQ CONFLATE mode +- `--zmq-verbose`: Enable verbose ZMQ subscriber logging +- `--zmq-out-port`: Port to which control results will be published when using `--output-type zmq` (default: `5557`) +- `--zmq-out-topic`: Topic to which control results will be published when using `--output-type zmq` (default: `g1_debug`) + +**Simulation:** +- `--disable-crc-check`: Disable CRC validation (required for MuJoCo simulation) + +**Hand & Compliance Control:** +- `--set-compliance `: Set initial VR 3-point compliance (0.01 = rigid, 0.5 = compliant; default: `0.5,0.5,0.0`). Can specify 1 value (applied to both hands) or 3 comma-separated values (`left_wrist,right_wrist,head`). Runtime keyboard controls: `g/h` = left hand ±0.1, `b/v` = right hand ±0.1. +- `--max-close-ratio `: Set initial hand max close ratio (0.2–1.0; default: 1.0 = full closure allowed). Runtime keyboard controls: `x/c` = ±0.1. + +**Logging (CLI flags):** +- **Debug / analysis logs (write a single CSV file)**: + - `--target-motion-logfile `: Log the target motion tracked by the controller (visualize with `visualize_motion.py`) + - `--planner-motion-logfile `: Log planner-generated animation sequences + - `--policy-input-logfile `: Log policy input (observation) tensors + - `--record-input-file `: Record operator control inputs to CSV for later playback + - `--playback-input-file `: Play back previously recorded control inputs from CSV +- **State CSV logs (write a timestamped directory)**: + - `--logs-dir `: Base directory for state CSV logs (default: `logs/dd-mm-yy/hh-mm-ss`) + - `--enable-csv-logs`: Enable robot state CSV logging (default: OFF) + - `--enable-motion-recording`: Record the active motion stream(s) to `reference/recorded_motion/...` (default: OFF) + +## Logging (Details) + +The system provides multiple logging capabilities for debugging, analysis, and replay. + +### Motion Logging + +**Target Motion (`--target-motion-logfile `):** +- Logs the motion the controller is tracking each control frame (~50 Hz) +- CSV columns: `pos_x, pos_y, pos_z, rot_qw, rot_qx, rot_qy, rot_qz, dof_0, dof_1, ... dof_28` + - Global position (xyz) + - Global rotation quaternion (w, x, y, z) + - 29 joint angles (DoF) + +**Planner Motion (`--planner-motion-logfile `):** +- Logs animation sequences generated by the planner (~10 Hz planning updates) +- Each planner update produces a short sequence (e.g., ~100 frames) that is appended to the CSV +- Same CSV format as target motion +- Contains motion blending and replanning results + +**Motion Recording (`--enable-motion-recording`):** +- Automatically records the currently active motion stream(s) into timestamped folders under `reference/recorded_motion/YYYYMMDD/` + - **Streamed motion** (ZMQ pose topic): saved as `streamed_HHMMSS/` + - **Planner motion** (planner-generated sequence): saved as `planner_motion_HHMMSS/` +- Each recording folder contains `joint_pos.csv`, `joint_vel.csv`, `body_pos.csv`, `body_quat.csv`, etc. +- Useful for offline inspection / regression comparisons of closed-loop behavior + +### Visualization + +All motion CSV files (logged data and reference motions) can be visualized using the `visualize_motion.py` script: + +```sh +# Visualize logged motion data (single CSV file) +python visualize_motion.py --csv_path target_motion.csv + +# Visualize reference motion from motion data directory +python visualize_motion.py --motion_dir reference/example/high_jump_full_turn/ +``` + +The visualizer script can connect to a running `g1_deploy` executable to visualize target/measured robot motions in real time: + +```sh +python visualize_motion.py --realtime_debug_url tcp://localhost:5557 +``` + +This displays three G1 robots: target animation (colored), target with zero translation (green), and measured sensor data (red). + +**Configuration:** +- Default port: 5557 (change with `--zmq-out-port `) +- Default topic: `g1_debug` (change with `--zmq-out-topic ` on executable, `--realtime_debug_topic ` on visualizer) +- For physical robots, replace `localhost` with the robot's IP address + +**Playback Controls:** +- **Space**: Pause/resume playback +- **`.`** (period): Step forward one frame +- **`,`** (comma): Step backward one frame +- **`r`**: Reset to frame 0 + +### Policy Input Logging + +**Policy Input (`--policy-input-logfile `):** +- Logs the raw observation tensor fed to the neural network policy +- Output: a single CSV file (one row per control step, all observation values) +- Useful for debugging observation configuration and input drift + +### Control Input Recording/Playback + +**Recording (`--record-input-file `):** +- Records control inputs (motion index, frame, operator state, planner state, movement commands) +- Logging starts when the control system is activated +- Tip: Wait a few seconds after lowering from gantry before starting control to give yourself setup time during playback + +**Playback (`--playback-input-file `):** +- Replays recorded control inputs for reproducible experiments +- Playback starts when the control system is activated +- Useful for testing policy changes with identical inputs + +### Robot State CSV Logger + +When enabled with `--enable-csv-logs`, the system logs detailed robot state at each control step (50 Hz). + +**Output Directory:** +- Default: `logs/dd-mm-yy/hh-mm-ss` (auto-generated timestamp) +- Custom: Use `--logs-dir ` to specify directory + +**Files Generated (split by signal type):** +- `base_quat.csv` — Base IMU quaternion (4 values: w, x, y, z) +- `base_ang_vel.csv` — Base angular velocity (3 values: x, y, z) +- `torso_quat.csv` — Torso IMU quaternion (4 values) +- `torso_ang_vel.csv` — Torso angular velocity (3 values) +- `q.csv` — Joint positions (29 joints) +- `dq.csv` — Joint velocities (29 joints) +- `action.csv` — Policy actions (29 joints) + +**CSV Format:** +- Columns: `index,time_ms,...` +- `time_ms`: Milliseconds since first log (0.0 at start, fractional allowed) +- Synchronized across all files using the same index/timestamp + +**Example:** + +```sh +just run g1_deploy_onnx_ref enp5s0 policy/model.onnx reference/motions/ \ + --obs-config policy/obs_config.yaml \ + --enable-csv-logs \ + --logs-dir logs/my_experiment +``` + +## Observation Configuration + +The system uses YAML configuration files to define which observations are fed to the policy. This allows flexible policy designs without code changes. + +**Basic Structure (`--obs-config `):** + +```yaml +observations: + - name: "body_joint_positions" + enabled: true + - name: "base_angular_velocity" + enabled: true + # ... other observations +``` + +**With Encoder (Token-Based Policies):** + +For policies that use encoded tokens, add an `encoder:` section: + +```yaml +observations: + - name: "token_state" # Encoder outputs (64-dim tokens) + enabled: true + - name: "base_angular_velocity" # Direct observations + enabled: true + +encoder: + dimension: 64 # Token output dimension + use_fp16: false # TensorRT precision (optional) + encoder_observations: + - name: "motion_joint_positions_10frame_step5" + enabled: true + # ... observations fed to encoder +``` + +Then run with `--encoder-file ` to load the encoder model. If omitted, tokens can be set externally via ROS2/ZMQ. + +**Complete Observation Reference:** + +For the full list of all available observation names, dimensions, and example configurations, see [Observation Configuration](observation_config.md). + +**Examples:** +- See `policy/observation_config_example.yaml` diff --git a/docs/source/references/index.md b/docs/source/references/index.md new file mode 100644 index 0000000..5fb071d --- /dev/null +++ b/docs/source/references/index.md @@ -0,0 +1,10 @@ +# Reference Documentation + +These pages contain the original standalone documentation that previously lived as individual `README_*.md` files. They are preserved here as detailed references. + +- [Deployment Code & Program Flow](deployment_code.md) — CLI arguments, logging, observation config, and visualization tools +- [Observation Configuration](observation_config.md) — YAML config format, all observation types with dimensions, and how to create custom observations +- [Motion Reference Data](motion_reference.md) — reference motion file format, conversion, verification, and usage +- [Kinematic Planner ONNX Model](planner_onnx.md) — detailed input/output specification for the ONNX-exported kinematic planner +- [JetPack 6 Flashing Guide](jetpack6.md) — flash the Orin NX on the Unitree G1 +- [Decoupled WBC (N1.5 / N1.6)](decoupled_wbc.md) — the earlier Decoupled WBC stack used in Gr00t N1.5 and N1.6 diff --git a/docs/source/references/jetpack6.md b/docs/source/references/jetpack6.md new file mode 100644 index 0000000..2b301c9 --- /dev/null +++ b/docs/source/references/jetpack6.md @@ -0,0 +1,118 @@ +# G1 JetPack 6 Flashing Guide + +## 1. Download Image + +1. **Download the required files** — get both the `.tar` file and the image file from [Jetpack 6.2](https://drive.google.com/drive/folders/1ho17ectOxi7FbaRFdpAbP4tet8BJWjbm). + +## 2. Unmount Orin NX's NVMe + +### Steps to Remove the NVMe SSD + +1. **Remove the back handle screws** + + Use a 5 mm T-handle Allen key to unscrew the two screws located at the back of the robot near the handle. + +2. **Remove the foam and plastic back cover** + + - Use the 2 mm hex tool from the Fanttik tool kit to remove the four screws holding the foam and plastic backing in place. + - Lift the backing off to expose the internal components. + +```{image} ../_static/screws.png +:width: 600px +:align: center +``` + +3. **Remove the NVMe screw on the Orin NX module** + + Use a Phillips screwdriver (also in the Fanttik tool kit) to remove the single screw securing the Orin NX's NVMe SSD. + +```{image} ../_static/ssd.png +:width: 600px +:align: center +``` + +4. **Remove the SSD card** + + Carefully slide out and remove the NVMe SSD from its slot. + +## 3. Flash the NVMe SSD + +**Mount the NVMe SSD from the Orin NX into the NVMe SSD enclosure adapter.** +(Adapter needed when burning image from a laptop) + +1. Check that the robot's SSD is unmounted. Run the following command to make sure the external SSD (where you will burn the image) is not mounted: + +```bash +sudo umount /dev/sda* +``` + +2. If the SSD was mounted, this command will safely unmount it so it's ready for imaging. + +3. Navigate to the folder where you have the image (`cd robot_NXUpgrade/`), then run the following command: + +```bash +bzip2 -dc g1-nx-j6.2.img.bz2 | sudo dd of=/dev/sda bs=4M status=progress conv=fsync +``` + +4. After it's done, eject the card with the following commands to safely unplug it: + +```bash +sudo sync +sudo udisksctl power-off -b /dev/sda +``` + +5. **Set the SSD card to the side and proceed with the second part of the flashing process!** + +## 4. Put the Robot Into Flashing Mode + +1. **Power on the G1** and wait until all three power indicator lights remain steadily lit. + +2. **Connect the robot to your laptop/desktop** using a USB-C cable. + +3. **Press and hold both white buttons** on the robot at the same time for two seconds. + +4. While still holding them, **release the top white button** and continue holding the **bottom button** for 2 seconds until the **three green lights change to two green lights**. + +```{image} ../_static/flashing.png +:width: 600px +:align: center +``` + +5. When only two lights are on, the robot is **now in flashing mode**. Open a new terminal on your computer and enter `lsusb`. You should see text containing `NVIDIA Corp. APX`. + +6. You can now proceed to run the following commands: + +```bash +sudo tar -xjvf Jetpack_6.2_nx.tar.bz2 +cd Jetpack_6.2_nx/Linux_for_Tegra +sudo ./flash_nx_module.sh +``` + +Wait patiently for about 8 minutes until it shows success. + +## 5. Reassemble the Robot + +1. After the flashing is complete, **power off the robot**. + +2. **Reinstall the Orin NX's NVMe SSD** back into its slot on the G1 robot and secure it with its screw. + +3. **Reattach the foam and plastic backing**, using the same tools you used to remove it. + +4. **Tighten all screws** to ensure the back cover and handle are securely in place. + +5. Turn on `maxn` mode on Jetson Orin using the command: + + +``` +sudo nvpmodel -m 0 +``` + +and use + + +``` +sudo jetson_clocks +sudo jetson_clocks --show +``` + +to check if it is already in Maxn model. diff --git a/docs/source/references/motion_reference.md b/docs/source/references/motion_reference.md new file mode 100644 index 0000000..8e218fe --- /dev/null +++ b/docs/source/references/motion_reference.md @@ -0,0 +1,295 @@ +# Motion Reference Data + +This page describes the motion reference data format used by the C++ deployment stack, how to create your own reference motions, and how to verify and deploy them. + +The deployment stack plays back pre-loaded **reference motions** — sequences of joint positions, velocities, and full body kinematics that the policy tracks. These motions are stored as CSV files in a structured folder hierarchy. You can generate them from any source (motion capture, simulation, retargeting pipeline, etc.) as long as the output matches the format described below. + +--- + +## Folder Structure + +Each motion dataset is a directory containing one subfolder per motion clip. The C++ reader (`MotionDataReader`) auto-discovers all subfolders at startup. + +``` +reference/my_motions/ +├── motion_name_1/ +│ ├── joint_pos.csv # Joint positions +│ ├── joint_vel.csv # Joint velocities +│ ├── body_quat.csv # Body quaternions +│ ├── body_pos.csv # Body positions +│ ├── metadata.txt # Body part indexes +│ ├── body_lin_vel.csv # Body linear velocities +│ ├── body_ang_vel.csv # Body angular velocities +│ ├── smpl_joint.csv # SMPL joint positions +│ ├── smpl_pose.csv # SMPL body poses +│ └── info.txt # Detailed motion information +└── motion_name_2/ + └── ... +``` + +The C++ reader scans the base directory for subfolders, reads each subfolder as one motion, and validates frame-count consistency across all files in that folder. +--- + +## File Formats + +The C++ reader loads whichever files are present and skips missing files gracefully. However, **in practice**, most policies require: +- `joint_pos.csv`, `joint_vel.csv` — for joint-based motion tracking +- `body_quat.csv` — for anchor orientation observations (the control loop will stop if this is missing when gathering observations) +- `body_pos.csv` — for heading computation and VR 3-point observations +- `metadata.txt` — for body part index alignment when body data is present + +A motion must have **at least one valid data source** (joint, body, or SMPL) to load at startup. + +### `joint_pos.csv` + +Joint positions in **IsaacLab order** (29 joints). Each row is one timestep at 50 Hz. The first row is a header. + +| Column | Description | +|--------|-------------| +| `joint_0` … `joint_28` | Joint angles in radians (IsaacLab ordering) | + +Shape: `(timesteps, 29)` + +### `joint_vel.csv` + +Joint velocities in **IsaacLab order** (29 joints). Each row is one timestep at 50 Hz. Frame count must match `joint_pos.csv`. + +| Column | Description | +|--------|-------------| +| `joint_vel_0` … `joint_vel_28` | Joint angular velocities in rad/s (IsaacLab ordering) | + +Shape: `(timesteps, 29)` + +### `body_pos.csv` + +Body part positions in the **world frame**. Each body contributes 3 columns (x, y, z). The number of bodies varies per motion. Needed for heading computation and VR 3-point observations. + +| Column | Description | +|--------|-------------| +| `body_0_x`, `body_0_y`, `body_0_z` | Position of body 0 (root/pelvis) in meters | +| `body_1_x`, `body_1_y`, `body_1_z` | Position of body 1 in meters | +| … | … | + +Shape: `(timesteps, num_bodies * 3)` + +**We assume the root/pelvis is always at column group 0** (the first 3 columns). + +### `body_quat.csv` + +Body part orientations as quaternions in the **world frame**. Each body contributes 4 columns. The quaternion ordering is **(w, x, y, z)**. **Required for most policies** — the `motion_anchor_orientation` observation (used by most policies) will fail and stop the control system if this file is missing. + +| Column | Description | +|--------|-------------| +| `body_0_w`, `body_0_x`, `body_0_y`, `body_0_z` | Quaternion of body 0 (root/pelvis) | +| `body_1_w`, `body_1_x`, `body_1_y`, `body_1_z` | Quaternion of body 1 | +| … | … | + +Shape: `(timesteps, num_bodies * 4)` + +**We assume the root/pelvis is always at column group 0** (the first 4 columns). + +```{note} +The number of bodies in `body_quat.csv` can differ from `body_pos.csv`. The C++ reader tracks them independently (`num_bodies` vs `num_body_quaternions`). However, the root body (first column group) must be present for heading computation to work. You can use zero if you don't need root pos. +``` + +### `metadata.txt` + +Contains the **body part indexes** array, which maps each column group in `body_pos.csv` / `body_quat.csv` to the corresponding IsaacLab body index. This is needed when body data is present. + +``` +Metadata for: motion_name +============================== + +Body part indexes: +[ 0 4 10 18 5 11 19 9 16 22 28 17 23 29] + +Total timesteps: 497 +``` + +The C++ reader parses `Body part indexes:` followed by a line of space-separated integers in brackets. For example, `[0, 4, 10, 18, ...]` means column group 0 → IsaacLab body 0 (pelvis/root), column group 1 → body 4, etc. + +For a **root-only** motion (only 1 body), use: + +``` +Body part indexes: +[0] +``` + +### `body_lin_vel.csv` / `body_ang_vel.csv` + +Body part linear and angular velocities in the world frame. Same layout as `body_pos.csv` (3 columns per body). The number of bodies must match `body_pos.csv`. + +### `smpl_joint.csv` + +SMPL joint positions (typically 24 joints × 3 coordinates). Each row is one timestep. + +Shape: `(timesteps, num_smpl_joints * 3)` + +### `smpl_pose.csv` + +SMPL body poses in axis-angle representation (typically 21 poses × 3 coordinates). Each row is one timestep. + +Shape: `(timesteps, num_smpl_poses * 3)` + +```{note} +The **current reference motion tracking pipeline uses joint-based tracking only** (encoder mode 0). To enable SMPL-based reference tracking (encoder mode 2), you would need to modify the code to detect the presence of SMPL data and switch the encoder mode accordingly. +``` + +### `info.txt` + +Human-readable summary with shapes, dtypes, and value ranges. Not read by the C++ stack — purely for documentation. + +--- + +## Creating Your Own Reference Motions + +You can generate reference motions from any source — the only requirement is producing CSV files in the format above. Common approaches: + +1. **Motion capture retargeting** — retarget human mocap to the G1 model, export joint positions/velocities and body kinematics. +2. **Simulation recording** — record joint states from an IsaacLab or MuJoCo simulation at 50 Hz. +3. **Procedural generation** — programmatically create joint trajectories. + +### Minimal Files Needed + +The **minimum** set of files to create a working motion for SONIC policy: + +1. **`joint_pos.csv`** — 29 joint positions (IsaacLab order), header + one row per timestep +2. **`joint_vel.csv`** — 29 joint velocities (IsaacLab order), header + one row per timestep +3. **`body_quat.csv`** — Root quaternion (w, x, y, z), header + one row per timestep +4. **`body_pos.csv`** — Root position (x, y, z), header + one row per timestep. You can use all zeros if you don't need position tracking. +5. **`metadata.txt`** — Body part indexes (just `[0]` for root-only) + +**Example files:** + +`joint_pos.csv`: +``` +joint_0,joint_1,joint_2,...,joint_28 +0.128441,0.102713,0.020116,...,0.045231 +0.130124,0.104532,0.021045,...,0.046112 +... +``` + +`joint_vel.csv`: +``` +joint_vel_0,joint_vel_1,...,joint_vel_28 +0.143671,0.143864,...,0.012345 +... +``` + +`body_quat.csv` (root quaternion only): +``` +body_0_w,body_0_x,body_0_y,body_0_z +0.999123,0.000456,0.001234,0.040567 +... +``` + +`body_pos.csv` (root position, can be all zeros): +``` +body_0_x,body_0_y,body_0_z +0.000000,0.000000,0.000000 +... +``` + +`metadata.txt`: +``` +Metadata for: my_motion +============================== + +Body part indexes: +[0] + +Total timesteps: 100 +``` + +This gives you a **root-only** motion (1 body = pelvis/root) that most policies can track. + + +### Provided Conversion Script + +A convenience script `reference/convert_motions.py` is included for converting **joblib pickle** (`.pkl`) files to this format. This is just one possible source — you can use any tool or pipeline that produces the correct CSV output. + +```bash +cd gear_sonic_deploy +python reference/convert_motions.py [output_dir] +``` + +The pickle should be a dictionary where each key is a motion name and each value contains `joint_pos`, `joint_vel`, `body_pos_w`, `body_quat_w`, `body_lin_vel_w`, `body_ang_vel_w`, `_body_indexes`, and `time_step_total`. + +--- + +## Verifying Reference Motions + +### MuJoCo Visualization + +Use the included visualizer to check that the motion looks correct on the G1 model: + +```bash +cd gear_sonic_deploy +python visualize_motion.py --motion_dir reference/my_motions/motion_name_1/ +``` + +**Controls:** +- **Space**: Pause / resume playback +- **R**: Reset to frame 0 +- **,** / **.**: Step backward / forward one frame +- **-** / **=**: Previous / next motion (if multiple loaded) + +Verify that: +- The robot stands upright and does not clip through the floor +- Joint angles look reasonable (no extreme poses) +- The motion plays smoothly without sudden jumps +- Body positions track the expected trajectory + +--- + +## Using Reference Motions + +### With `deploy.sh` + +Pass the motion directory via `--motion-data`: + +```bash +./deploy.sh --motion-data reference/my_motions/ sim +``` + +Or use the default motions (configured in `deploy.sh`): + +```bash +./deploy.sh sim +``` + +### At Runtime + +Once deployed, use the keyboard or gamepad to browse and play motions: + +- **T**: Play current motion +- **N / P**: Next / Previous motion +- **R**: Restart from frame 0 + +See the [Keyboard tutorial](../tutorials/keyboard.md) for the full control reference. + +--- + +## Validation Rules + +The C++ reader enforces the following during loading: + +- **Frame count consistency**: All CSV files within a motion folder must have the same number of rows (excluding headers). Mismatches cause the motion to be skipped with an error. +- **Joint count consistency**: `joint_pos.csv` and `joint_vel.csv` must have the same number of columns. +- **Body count consistency**: `body_lin_vel.csv` and `body_ang_vel.csv` must have the same number of body columns as `body_pos.csv`. +- **At least one data source**: A motion must have at least some valid data (joint, body, or SMPL) to be loaded. +- **Metadata parsing**: The `metadata.txt` file must contain a `Body part indexes:` line followed by a bracketed list of integers for the motion to have correct body-part alignment. + +If a motion fails validation, it is skipped and a warning is printed. The deployment continues with the remaining valid motions. + +--- + +## Notes + +- All data is at **50 Hz** (0.02 s per timestep), matching the control loop frequency. +- Joint ordering follows **IsaacLab convention** (not MuJoCo). The C++ stack handles the conversion internally when sending motor commands. +- Body quaternions use **(w, x, y, z)** ordering. +- The first body (column group 0) must correspond to the root/pelvis for heading computation and anchor orientation observations to work correctly. +- While the C++ reader can load motions without `body_quat.csv`, the control loop will fail during observation gathering if the policy observes `motion_anchor_orientation` (which most policies do). +- CSV files must have a **header row** as the first line — the C++ reader skips the first line of every CSV. +- Values are parsed as `double` precision internally. diff --git a/docs/source/references/observation_config.md b/docs/source/references/observation_config.md new file mode 100644 index 0000000..1feff2c --- /dev/null +++ b/docs/source/references/observation_config.md @@ -0,0 +1,431 @@ +# Observation Configuration + +This page is the complete reference for configuring observations in the deployment system. It covers the YAML configuration format, the encoder system, every available observation type, and how to create your own custom observations. + +(obs-config-format)= +## Configuration Format + +Observations are configured via a YAML file passed with `--obs-config `. Each observation has a `name` (must match a registered observation) and an `enabled` flag. + +### Basic Structure + +```yaml +observations: + - name: "motion_joint_positions" + enabled: true + - name: "motion_joint_velocities" + enabled: true + - name: "motion_anchor_orientation" + enabled: true + - name: "base_angular_velocity" + enabled: true + - name: "body_joint_positions" + enabled: true + - name: "body_joint_velocities" + enabled: true + - name: "last_actions" + enabled: true +``` + +**Key rules:** + +- Observations are concatenated **in the order listed** to form the policy input vector. +- Offsets are calculated automatically — no manual offset management needed. +- The **total dimension** of all enabled observations must match your ONNX model's input size. +- Disabled observations (`enabled: false`) are skipped entirely. +- Reordering entries changes the layout of the input tensor (offsets shift accordingly). + +(obs-config-encoder)= +### With Encoder (Token-Based Policies) + +For policies that use an encoder to compress observations into a compact token, add an `encoder:` section: + +```yaml +observations: + - name: "token_state" # Encoder outputs (dimension set below) + enabled: true + - name: "base_angular_velocity" # Direct observations + enabled: true + - name: "body_joint_positions" + enabled: true + - name: "body_joint_velocities" + enabled: true + - name: "last_actions" + enabled: true + +encoder: + dimension: 64 # Token output dimension + use_fp16: false # TensorRT precision for encoder (optional) + encoder_observations: + - name: "motion_joint_positions_10frame_step5" + enabled: true + - name: "motion_joint_velocities_10frame_step5" + enabled: true + - name: "motion_anchor_orientation_10frame_step5" + enabled: true + - name: "motion_root_z_position_10frame_step5" + enabled: true + encoder_modes: # Optional: per-mode observation requirements + - name: "g1" + mode_id: 0 + required_observations: + - motion_joint_positions_10frame_step5 + - motion_joint_velocities_10frame_step5 + - motion_anchor_orientation_10frame_step5 + - motion_root_z_position_10frame_step5 +``` + +**Encoder fields:** + +| Field | Description | +|---|---| +| `dimension` | Token output dimension (must match encoder ONNX model output). Set to 0 or omit to disable encoder. | +| `use_fp16` | Use FP16 precision for encoder TensorRT engine (default: false). | +| `encoder_observations` | Observations fed to the encoder (superset of all modes). Same name/enabled format as policy observations. | +| `encoder_modes` | *(Optional)* Per-mode observation requirements. Observations not in a mode's `required_observations` are zero-filled, saving computation. | + +Run with `--encoder-file ` to load the encoder model. If omitted, `token_state` can be set externally via ROS2/ZMQ. + +See `policy/observation_config_example.yaml` for a complete annotated example. + +### Naming Convention + +Multi-frame observations follow the pattern: `{base_name}_{N}frame_step{S}` + +- **N** = number of frames gathered (temporal window size) +- **S** = step size between frames (in control ticks at 50 Hz, so step5 = 0.1 s apart) +- Without the suffix = single current frame only + +For example, `motion_joint_positions_10frame_step5` gathers 10 frames of joint positions, sampled every 5 ticks (0.1 s), giving a 0.9 s look-ahead window. If future frames exceed the motion length, the last frame is repeated. + +--- + +## Encoder & Token Observations + +These observations relate to the encoder (tokenizer) system. See [With Encoder](obs-config-encoder) above for the YAML format. + +| Name | Dim | Description | +|---|---|---| +| `token_state` | config | Encoder output tokens (dimension set by `encoder.dimension` in YAML). Populated by local encoder inference or externally via ZMQ/ROS2. | +| `encoder_mode` | 3 | Current encoder mode ID + 2 zero-padding values. | +| `encoder_mode_4` | 4 | Current encoder mode ID + 3 zero-padding values. | + +--- + +## Motion Reference Observations + +Gathered from the currently-active motion sequence (reference motions, planner output, or ZMQ stream). All joint data uses **IsaacLab joint ordering** (29 joints). + +### Joint Positions (from motion) + +| Name | Dim | Frames | Step | Description | +|---|---|---|---|---| +| `motion_joint_positions` | 29 | 1 | — | Current frame joint positions (rad) | +| `motion_joint_positions_3frame_step1` | 87 | 3 | 1 | 3-frame window, consecutive | +| `motion_joint_positions_5frame_step5` | 145 | 5 | 5 | 5-frame window, 0.1 s apart | +| `motion_joint_positions_10frame_step1` | 290 | 10 | 1 | 10-frame window, consecutive | +| `motion_joint_positions_10frame_step5` | 290 | 10 | 5 | 10-frame window, 0.1 s apart | +| `motion_joint_positions_lowerbody_10frame_step1` | 120 | 10 | 1 | Lower-body joints only (12 joints), consecutive | +| `motion_joint_positions_lowerbody_10frame_step5` | 120 | 10 | 5 | Lower-body joints only, 0.1 s apart | +| `motion_joint_positions_wrists_10frame_step1` | 60 | 10 | 1 | Wrist joints only (6 joints), consecutive | +| `motion_joint_positions_wrists_2frame_step1` | 12 | 2 | 1 | Wrist joints only, 2 consecutive frames | + +```{note} +When upper-body control is active (e.g., via ZMQ/ROS2 teleoperation), the upper-body joint positions in these observations are replaced with the externally-provided targets. +``` + +### Joint Velocities (from motion) + +| Name | Dim | Frames | Step | Description | +|---|---|---|---|---| +| `motion_joint_velocities` | 29 | 1 | — | Current frame joint velocities (rad/s). Zero when not playing. | +| `motion_joint_velocities_3frame_step1` | 87 | 3 | 1 | 3-frame window, consecutive | +| `motion_joint_velocities_5frame_step5` | 145 | 5 | 5 | 5-frame window, 0.1 s apart | +| `motion_joint_velocities_10frame_step1` | 290 | 10 | 1 | 10-frame window, consecutive | +| `motion_joint_velocities_10frame_step5` | 290 | 10 | 5 | 10-frame window, 0.1 s apart | +| `motion_joint_velocities_lowerbody_10frame_step1` | 120 | 10 | 1 | Lower-body joints only, consecutive | +| `motion_joint_velocities_lowerbody_10frame_step5` | 120 | 10 | 5 | Lower-body joints only, 0.1 s apart | +| `motion_joint_velocities_wrists_10frame_step1` | 60 | 10 | 1 | Wrist joints only, consecutive | + +### Anchor Orientation (from motion) + +Heading-corrected relative rotation from the robot's current base orientation to the reference motion orientation. Output is the first two columns of the 3×3 rotation matrix (6 values per frame). + +| Name | Dim | Frames | Step | Description | +|---|---|---|---|---| +| `motion_anchor_orientation` | 6 | 1 | — | Current frame anchor orientation | +| `motion_anchor_orientation_10frame_step1` | 60 | 10 | 1 | 10-frame window, consecutive | +| `motion_anchor_orientation_10frame_step5` | 60 | 10 | 5 | 10-frame window, 0.1 s apart | + +### Root Z Position (from motion) + +| Name | Dim | Frames | Step | Description | +|---|---|---|---|---| +| `motion_root_z_position` | 1 | 1 | — | Current frame root height (m) | +| `motion_root_z_position_3frame_step1` | 3 | 3 | 1 | 3-frame window, consecutive | +| `motion_root_z_position_10frame_step1` | 10 | 10 | 1 | 10-frame window, consecutive | +| `motion_root_z_position_10frame_step5` | 10 | 10 | 5 | 10-frame window, 0.1 s apart | + +--- + +## SMPL Observations + +Gathered from SMPL data in the motion sequence (optional — requires motions with `smpl_joint.csv` / `smpl_pose.csv`). + +### SMPL Joint Positions + +3D positions per SMPL joint (24 joints × 3 = 72 per frame). + +| Name | Dim | Frames | Step | Description | +|---|---|---|---|---| +| `smpl_joints` | 72 | 1 | — | Current frame, all 24 SMPL joints | +| `smpl_joints_2frame_step1` | 144 | 2 | 1 | 2 consecutive frames | +| `smpl_joints_5frame_step5` | 360 | 5 | 5 | 5-frame window, 0.1 s apart | +| `smpl_joints_10frame_step1` | 720 | 10 | 1 | 10-frame window, consecutive | +| `smpl_joints_10frame_step5` | 720 | 10 | 5 | 10-frame window, 0.1 s apart | +| `smpl_joints_lower_10frame_step1` | 270 | 10 | 1 | Lower-body SMPL joints only (9 joints), consecutive | + +### SMPL Poses (Axis-Angle) + +3D axis-angle per SMPL body part (21 poses × 3 = 63 per frame). + +| Name | Dim | Frames | Step | Description | +|---|---|---|---|---| +| `smpl_pose` | 63 | 1 | — | Current frame, all 21 SMPL poses | +| `smpl_pose_5frame_step5` | 315 | 5 | 5 | 5-frame window, 0.1 s apart | +| `smpl_pose_10frame_step1` | 630 | 10 | 1 | 10-frame window, consecutive | +| `smpl_pose_10frame_step5` | 630 | 10 | 5 | 10-frame window, 0.1 s apart | +| `smpl_elbow_wrist_poses_10frame_step1` | 120 | 10 | 1 | Elbow + wrist poses only (4 parts), consecutive | + +### SMPL Aliases + +These use the same gatherers as the motion observations but are intended for SMPL-based policies: + +| Name | Dim | Frames | Step | Description | +|---|---|---|---|---| +| `smpl_root_z_10frame_step1` | 10 | 10 | 1 | Root height, 10 consecutive frames | +| `smpl_anchor_orientation_10frame_step1` | 60 | 10 | 1 | Anchor orientation, 10 consecutive frames | +| `smpl_anchor_orientation_2frame_step1` | 12 | 2 | 1 | Anchor orientation, 2 consecutive frames | + +--- + +## VR Tracking Observations + +VR 3-point and 5-point tracking data. When an external source (ZMQ/ROS2) provides VR data, buffered values are used directly. Otherwise, positions and orientations are computed from the motion sequence's body data and normalised to the root body frame. + +### VR 3-Point + +| Name | Dim | Description | +|---|---|---| +| `vr_3point_local_target` | 9 | 3-point positions in root frame: `[left_wrist xyz, right_wrist xyz, head xyz]` | +| `vr_3point_local_target_compliant` | 9 | Same as above (identical during teleoperation) | +| `vr_3point_local_orn_target` | 12 | 3-point orientations in root frame: `[left quat wxyz, right quat wxyz, head quat wxyz]` | +| `vr_3point_compliance` | 3 | Compliance values: `[left_arm, right_arm, head]`. Keyboard-controlled (g/h/b/v keys), range [0.0, 0.5]. | + +### VR 5-Point + +| Name | Dim | Description | +|---|---|---| +| `vr_5point_local_target` | 15 | 5-point positions in root frame: `[left_wrist, right_wrist, head, left_ankle, right_ankle]` × xyz | +| `vr_5point_local_orn_target` | 20 | 5-point orientations in root frame: 5 quaternions × wxyz | + +--- + +## Robot State History Observations + +Gathered from the StateLogger ring buffer (measured sensor data from the real robot). These provide temporal context by sampling past states. + +### Single-Frame (Current State) + +| Name | Dim | Description | +|---|---|---| +| `base_angular_velocity` | 3 | IMU angular velocity (rad/s): `[roll_rate, pitch_rate, yaw_rate]` | +| `body_joint_positions` | 29 | Current joint positions from encoders (rad, IsaacLab order) | +| `body_joint_velocities` | 29 | Current joint velocities from encoders (rad/s, IsaacLab order) | +| `last_actions` | 29 | Previous policy output (normalised action values) | +| `gravity_dir` | 3 | Gravity direction in body frame (computed from base IMU quaternion) | + +### Multi-Frame History (4 frames, step 1) + +| Name | Dim | Description | +|---|---|---| +| `his_body_joint_positions_4frame_step1` | 116 | Joint positions: 4 consecutive ticks (29 × 4) | +| `his_body_joint_velocities_4frame_step1` | 116 | Joint velocities: 4 consecutive ticks | +| `his_last_actions_4frame_step1` | 116 | Past actions: 4 consecutive ticks | +| `his_base_angular_velocity_4frame_step1` | 12 | Angular velocity: 4 consecutive ticks (3 × 4) | +| `his_gravity_dir_4frame_step1` | 12 | Gravity direction: 4 consecutive ticks | + +### Multi-Frame History (10 frames, step 1) + +| Name | Dim | Description | +|---|---|---| +| `his_body_joint_positions_10frame_step1` | 290 | Joint positions: 10 consecutive ticks (29 × 10) | +| `his_body_joint_velocities_10frame_step1` | 290 | Joint velocities: 10 consecutive ticks | +| `his_last_actions_10frame_step1` | 290 | Past actions: 10 consecutive ticks | +| `his_base_angular_velocity_10frame_step1` | 30 | Angular velocity: 10 consecutive ticks (3 × 10) | +| `his_gravity_dir_10frame_step1` | 30 | Gravity direction: 10 consecutive ticks | + +--- + +## Creating Custom Observations + +You can add your own observation types by modifying the C++ source. The observation system is built around a **registry pattern** — you write a gatherer function, register it with a name and dimension, and then use that name in your YAML config. + +All observation code lives in `gear_sonic_deploy/src/g1/g1_deploy_onnx_ref/src/g1_deploy_onnx_ref.cpp` inside the `G1Deploy` class. + +### Step 1: Write a Gatherer Function + +A gatherer function reads from internal state (sensor data, motion data, etc.) and writes its output into a target buffer at a given offset. The signature is: + +```cpp +bool MyObservation(std::vector& target_buffer, size_t offset) { + // Write your observation values into target_buffer starting at offset. + // Return true on success, false on failure (will stop the control loop). +} +``` + +**Available data sources inside G1Deploy** (see member variables in `g1_deploy_onnx_ref.cpp` for the full list): + +| Source | Description | +|---|---| +| `state_logger_` | Ring buffer of past robot states — IMU, joints, velocities, actions, hand states, token state | +| `current_motion_` / `current_frame_` | Currently-active motion sequence and playback cursor | +| `operator_state` | Operator control flags (`.play`, `.start`, `.stop`) | +| `vr_*_buffer_`, `left_hand_joint_buffer_`, etc. | Buffered input interface data — VR tracking, hand joints, compliance, upper-body targets | +| `heading_state_buffer_`, `movement_state_buffer_` | Thread-safe buffers for heading and planner movement commands | + +**Example** — a custom observation that outputs the torso IMU angular velocity (3 values): + +```cpp +bool GatherTorsoAngularVelocity(std::vector& target_buffer, size_t offset) { + if (!state_logger_) { return false; } + + auto hist = state_logger_->GetLatest(1); + if (hist.empty()) { return false; } + + const auto& entry = hist[0]; + target_buffer[offset + 0] = entry.body_torso_ang_vel[0]; + target_buffer[offset + 1] = entry.body_torso_ang_vel[1]; + target_buffer[offset + 2] = entry.body_torso_ang_vel[2]; + return true; +} +``` + +### Step 2: Register in the Observation Registry + +Add your observation to the `GetObservationRegistry()` method in `g1_deploy_onnx_ref.cpp`. Each entry is a tuple of `{name, dimension, gatherer_lambda}`: + +```cpp +std::vector GetObservationRegistry() { + return { + // ... existing observations ... + + // Your custom observation: + {"torso_angular_velocity", 3, + [this](std::vector& buf, size_t offset) { + return GatherTorsoAngularVelocity(buf, offset); + }}, + }; +} +``` + +The **name** is the string you'll use in the YAML config. The **dimension** must be exact — the system validates that the total of all enabled observations matches the ONNX model input size. + +### Step 3: Use in YAML Config + +Once registered, your observation is available like any built-in one: + +```yaml +observations: + - name: "torso_angular_velocity" + enabled: true + # ... other observations ... +``` + +### Tips + +- **Dimension must be fixed.** The observation dimension is set at registration time and cannot change at runtime. If you need variable-size data, pad to a fixed maximum. +- **Don't allocate in the hot path.** Gatherer functions run at 50 Hz in the control loop. Avoid `new`, `malloc`, or resizing vectors. Pre-allocate buffers in the constructor or use stack arrays. +- **Return `false` carefully.** Returning `false` from a gatherer stops the entire control loop. Only return `false` for unrecoverable errors. For missing optional data, write zeros and return `true`. +- **Thread safety.** Gatherers run on the control thread. Reading from `state_logger_` and `DataBuffer` objects is thread-safe. Accessing `current_motion_` and `current_frame_` is protected by `current_motion_mutex_` (already held when `GatherObservations()` is called). +- **Multi-frame pattern.** If your observation needs temporal windows, follow the existing `GatherHis*` or `GatherMotion*MultiFrame` patterns — they accept `num_frames` and `step_size` parameters and register multiple variants (e.g., `my_obs`, `my_obs_4frame_step1`, `my_obs_10frame_step5`). +- **Encoder observations.** Custom observations can also be used as encoder inputs. Register them in the same registry — they'll be available for both `observations:` and `encoder_observations:` in the YAML config. +- **Rebuild after changes.** After modifying the C++ source, rebuild with `just build` from the `gear_sonic_deploy/` directory. + +--- + +## Example Configurations + +### Minimal (154D — default policy) + +```yaml +observations: + - name: "motion_joint_positions" # 29D + enabled: true + - name: "motion_joint_velocities" # 29D + enabled: true + - name: "motion_anchor_orientation" # 6D + enabled: true + - name: "base_angular_velocity" # 3D + enabled: true + - name: "body_joint_positions" # 29D + enabled: true + - name: "body_joint_velocities" # 29D + enabled: true + - name: "last_actions" # 29D + enabled: true +# Total: 154D +``` + +### Token-Based Policy with Encoder + +```yaml +observations: + - name: "token_state" # 64D (from encoder) + enabled: true + - name: "base_angular_velocity" # 3D + enabled: true + - name: "body_joint_positions" # 29D + enabled: true + - name: "body_joint_velocities" # 29D + enabled: true + - name: "last_actions" # 29D + enabled: true + +encoder: + dimension: 64 + use_fp16: false + encoder_observations: + - name: "motion_joint_positions_10frame_step5" # 290D + enabled: true + - name: "motion_joint_velocities_10frame_step5" # 290D + enabled: true + - name: "motion_anchor_orientation_10frame_step5" # 60D + enabled: true + - name: "motion_root_z_position_10frame_step5" # 10D + enabled: true +``` + +### VR Teleoperation Policy + +```yaml +observations: + - name: "token_state" # 64D + enabled: true + - name: "vr_3point_local_target" # 9D + enabled: true + - name: "vr_3point_local_orn_target" # 12D + enabled: true + - name: "vr_3point_compliance" # 3D + enabled: true + - name: "base_angular_velocity" # 3D + enabled: true + - name: "body_joint_positions" # 29D + enabled: true + - name: "body_joint_velocities" # 29D + enabled: true + - name: "last_actions" # 29D + enabled: true +``` + +See [Configuration Format](obs-config-format) above for YAML syntax details. diff --git a/docs/source/references/planner_onnx.md b/docs/source/references/planner_onnx.md new file mode 100644 index 0000000..d0d2e8c --- /dev/null +++ b/docs/source/references/planner_onnx.md @@ -0,0 +1,493 @@ +# Kinematic Planner ONNX Model Reference + +This page provides a detailed specification of the **Kinematic Planner** ONNX model inputs and outputs. The kinematic planner is the core motion generation component of the GEAR-SONIC system: given the robot's current state and high-level navigation commands, it produces a sequence of future whole-body poses (MuJoCo `qpos` frames) that the low-level whole-body controller then tracks. + +The ONNX model is part of the **C++ inference stack** and is called by the deployment runtime during operation. The C++ stack manages input construction, timing, and state management — certain combinations of inputs are invalid and are handled by the C++ layer to ensure safe operation. This page is intended for developers who want to understand the model interface at a deeper level or build custom integrations beyond the standard deployment pipeline. + +```{admonition} Training Code & Technical Report +:class: note +The kinematic planner training code and technical report will be released soon. This page documents the ONNX model interface for deployment integration. +``` + +--- + +## Overview + +The planner takes **11 input tensors** and produces **2 output tensors**. The 6 primary inputs are listed below; the remaining 5 are advanced inputs managed by the C++ stack and should not need to be modified in most cases. + +**Primary inputs:** + +| Tensor Name | Shape | Dtype | Default | +|-------------|-------|-------|---------| +| `context_mujoco_qpos` | `[1, 4, 36]` | `float32` | Required | +| `target_vel` | `[1]` | `float32` | `-1.0` (use mode default velocity) | +| `mode` | `[1]` | `int64` | Required | +| `movement_direction` | `[1, 3]` | `float32` | Required | +| `facing_direction` | `[1, 3]` | `float32` | Required | +| `height` | `[1]` | `float32` | `-1.0` (disable height control) | + +**Outputs:** + +| Tensor Name | Shape | Dtype | +|-------------|-------|-------| +| `mujoco_qpos` | `[1, N, 36]` | `float32` | +| `num_pred_frames` | scalar | `int64` | + +Where: +- **K** = `max_tokens - min_tokens + 1` (model-dependent; the range of allowed prediction horizons) +- **N** = maximum number of output frames (padded); only the first `num_pred_frames` frames are valid + +--- + +## Coordinate System + +The model operates in **MuJoCo's Z-up coordinate convention**: + +- **X** — forward +- **Y** — left +- **Z** — up + +All position and direction vectors in the inputs and outputs follow this convention. + +--- + +## Input Tensors + +(context_mujoco_qpos)= +### `context_mujoco_qpos` + +| Property | Value | +|----------|-------| +| **Shape** | `[1, 4, 36]` | +| **Dtype** | `float32` | +| **Description** | The planner's context input consisting of 4 consecutive MuJoCo `qpos` frames representing the recent states of the robot | + +This is the primary context input. It provides 4 frames of the robot's recent joint configuration at the simulation framerate. +The 36 dimensions of each frame are the standard MuJoCo `qpos` vector for the Unitree G1 (29-DOF) model: + +| Index | Field | Description | +|-------|-------|-------------| +| 0–2 | Root position | `(x, y, z)` in meters, Z-up world frame | +| 3–6 | Root quaternion | `(w, x, y, z)` orientation — MuJoCo convention | +| 7–35 | DOF positions | 29 joint angles in radians, following MuJoCo body tree order | + +```{admonition} Coordinate Frame +:class: note +All inputs — including `context_mujoco_qpos`, `movement_direction`, `facing_direction`, `specific_target_positions`, and `specific_target_headings` — should be provided in the **world coordinate frame**. The root quaternion uses MuJoCo's `(w, x, y, z)` ordering at indices 3 to 6. The model handles canonicalization internally. +``` + +### `target_vel` + +| Property | Value | +|----------|-------| +| **Shape** | `[1]` | +| **Dtype** | `float32` | +| **Description** | Desired locomotion speed override | + +Controls the target movement speed. When set to **zero or below** (e.g., `-1.0`), the model uses the default velocity for the selected mode. When set to a **positive value**, it overrides the mode's default speed (in meters per second). Note that the actual achieved speed may differ from the target due to the critically damped spring model and motion dynamics. + +| Value | Behavior | +|-------|----------| +| `<= 0.0` | Use the default velocity for the selected `mode` | +| `> 0.0` | Override with this target velocity (m/s) | + + +### `mode` + +| Property | Value | +|----------|-------| +| **Shape** | `[1]` | +| **Dtype** | `int64` | +| **Description** | Index selecting the motion style/behavior | + +Selects the motion style from the pre-loaded clip library. The mode index is clamped to the number of available clips at runtime. The default planner ships with the following modes: + +**Locomotion set:** + +| Index | Mode | Description | +|-------|------|-------------| +| 0 | `idle` | Standing still | +| 1 | `slowWalk` | Slow forward locomotion | +| 2 | `walk` | Normal walking speed | +| 3 | `run` | Running | + +**Squat / ground set:** + +| Index | Mode | Description | +|-------|------|-------------| +| 4 | `squat` | Squatting — requires `height` input (range ~0.4–0.8m) | +| 5 | `kneelTwoLeg` | Kneeling on both knees — requires `height` input (0.2m-0.4m) | +| 6 | `kneelOneLeg` | Kneeling on one knee — requires `height` input (0.2m-0.4m) | +| 7 | `lyingFacedown` | Lying face down — requires `height` input | +| 8 | `handCrawling` | Crawling on hands and knees | +| 14 | `elbowCrawling` | Crawling on elbows (more likely to overheat) | + +**Boxing set:** + +| Index | Mode | Description | +|-------|------|-------------| +| 9 | `idleBoxing` | Boxing stance (idle) | +| 10 | `walkBoxing` | Walking with boxing guard | +| 11 | `leftJab` | Left jab | +| 12 | `rightJab` | Right jab | +| 13 | `randomPunches` | Random punch sequence | +| 15 | `leftHook` | Left hook | +| 16 | `rightHook` | Right hook | + +**Style walks:** + +| Index | Mode | Description | +|-------|------|-------------| +| 17 | `happy` | Happy walking | +| 18 | `stealth` | Stealthy walking | +| 19 | `injured` | Limping walk | +| 20 | `careful` | Cautious walking | +| 21 | `objectCarrying` | Walking with hands reaching out | +| 22 | `crouch` | Crouched walking | +| 23 | `happyDance` | Dancing walk (only walk forward) | +| 24 | `zombie` | Zombie walk | +| 25 | `point` | Walking with hands pointing | +| 26 | `scared` | Scared walk | + +### `movement_direction` + +| Property | Value | +|----------|-------| +| **Shape** | `[1, 3]` | +| **Dtype** | `float32` | +| **Description** | Desired direction of movement in the MuJoCo world frame | + +A 3D direction vector `(x, y, z)` in the Z-up world coordinate system indicating where the robot should move. It is recommended to pass a normalized vector for good practice, though the model normalizes internally. Speed is controlled by `target_vel` and `mode`, not by the magnitude of this vector. + +- The planner uses the `(x, y)` components (horizontal plane) for computing the target root trajectory via a critically-damped spring model. +- When the magnitude is near zero (`< 1e-5`), the model falls back to using the `facing_direction` with a small scaling factor for in-place turning. + + +### `facing_direction` + +| Property | Value | +|----------|-------| +| **Shape** | `[1, 3]` | +| **Dtype** | `float32` | +| **Description** | Desired facing (heading) direction in the MuJoCo world frame | + +A 3D direction vector `(x, y, z)` indicating which direction the robot's torso should face. The target heading angle is computed as `atan2(y, x)` from this vector. Like `movement_direction`, this does not need to be normalized. + +This is independent of `movement_direction` — the robot can walk in one direction while facing another (e.g., strafing). + + +### `height` + +| Property | Value | +|----------|-------| +| **Shape** | `[1]` | +| **Dtype** | `float32` | +| **Description** | Desired root height for height-aware behaviors | + +Controls the target pelvis height for modes that support variable height (e.g., `squat`, `kneelTwoLeg`, `kneelOneLeg`, `lyingFacedown`). When a positive value is provided, the model searches the reference clip's keyframes and selects the one whose root height is closest to the requested value, using it as the target pose for motion generation. + +| Value | Behavior | +|-------|----------| +| `< 0.0` | Height control disabled; use the randomly-selected keyframe from the reference clip | +| `>= 0.0` | Find the closest height keyframe in the reference clip and use it as the target pose (meters) | + + +
+ +## Advanced Inputs + +These inputs are managed internally by the C++ deployment stack and **should not be modified** under normal operation. They are documented here for completeness and for advanced users who need to build custom integrations. + +### `random_seed` + +| Property | Value | +|----------|-------| +| **Shape** | `[1]` | +| **Dtype** | `int64` | +| **Description** | Seed for controlling network randomness | + + +### `has_specific_target` + +| Property | Value | +|----------|-------| +| **Shape** | `[1, 1]` | +| **Dtype** | `int64` | +| **Description** | Flag indicating whether specific waypoint targets are provided | + +| Value | Behavior | +|-------|----------| +| `0` | Ignore `specific_target_positions` and `specific_target_headings`; use `movement_direction` / `facing_direction` | +| `1` | Use the provided specific target positions and headings as waypoint constraints | + +When enabled, the spring model's target root position and heading are overridden by the values in `specific_target_positions` and `specific_target_headings`. + + +### `specific_target_positions` + +| Property | Value | +|----------|-------| +| **Shape** | `[1, 4, 3]` | +| **Dtype** | `float32` | +| **Description** | 4 waypoint positions in MuJoCo world coordinates | + +Each waypoint is a 3D position `(x, y, z)` in the Z-up world frame. The 4 waypoints correspond to 4 frames (one token's worth) of target root positions. Only used when `has_specific_target = 1`. + + +### `specific_target_headings` + +| Property | Value | +|----------|-------| +| **Shape** | `[1, 4]` | +| **Dtype** | `float32` | +| **Description** | 4 waypoint heading angles in radians | + +Target heading (yaw) angles for each of the 4 waypoint frames. These are absolute angles in the Z-up world frame, measured as rotation around the Z-axis. Only used when `has_specific_target = 1`. The last waypoint's heading (`[:, -1]`) is used as the primary target heading for the spring model. + + +### `allowed_pred_num_tokens` + +| Property | Value | +|----------|-------| +| **Shape** | `[1, K]` where `K = max_tokens - min_tokens + 1` | +| **Dtype** | `int64` | +| **Description** | Binary mask controlling the allowed prediction horizon | + +A binary mask where each element corresponds to a possible number of predicted tokens. Index `i` maps to `min_tokens + i` tokens. A value of `1` means that prediction length is allowed; `0` means it is disallowed. + +Since each token represents 4 frames, the prediction horizon in frames is `num_tokens * 4`. In our default planner we have `min_tokens = 6` and `max_tokens = 16`: + +| Index | Tokens | Frames | +|-------|--------|--------| +| 0 | 6 | 24 | +| 1 | 7 | 28 | +| 2 | 8 | 32 | +| 3 | 9 | 36 | +| 4 | 10 | 40 | +| 5 | 11 | 44 | +| 6 | 12 | 48 | +| 7 | 13 | 52 | +| 8 | 14 | 56 | +| 9 | 15 | 60 | +| 10 | 16 | 64 | + +--- + +## Output Tensors + +### `mujoco_qpos` + +| Property | Value | +|----------|-------| +| **Shape** | `[1, N, 36]` | +| **Dtype** | `float32` | +| **Description** | Predicted motion sequence as MuJoCo `qpos` frames | + +The primary output: a sequence of whole-body pose frames in the same 36-dimensional MuJoCo `qpos` format as the input (see {ref}`context_mujoco_qpos ` for the dimension layout). + +```{admonition} Important: Use num_pred_frames to Truncate +:class: warning +The output tensor `mujoco_qpos` is **not truncated** — it contains the full padded buffer. Only the first `num_pred_frames` frames are valid predictions. When consuming this output, always slice: +``` + +```python +valid_qpos = mujoco_qpos[:, :num_pred_frames, :] +``` + +The poses are in the **global MuJoCo world frame** (not canonicalized). The model internally handles canonicalization, inference, and coordinate conversion, then transforms the output back to the original world frame. The first 4 predicted frames are blended with the input context for smooth transitions. + +The root quaternion in the output uses `(w, x, y, z)` ordering (MuJoCo convention). + + +### `num_pred_frames` + +| Property | Value | +|----------|-------| +| **Shape** | scalar | +| **Dtype** | `int64` | +| **Description** | Number of valid predicted frames in the `mujoco_qpos` output | + +This value equals `num_pred_tokens * 4`, where `num_pred_tokens` is the number of motion tokens the model decided to generate (constrained by `allowed_pred_num_tokens`). Use this value to slice the `mujoco_qpos` output. + +--- + +## Internal Pipeline + +1. **Canonicalization** — The input qpos is transformed to a body-relative frame by removing the first frame's heading rotation and horizontal position. This helps the model generalize across different starting orientations and positions. + +2. **Spring Model** — A critically-damped spring model generates smooth target root trajectories and heading angles from the high-level commands, using mode-dependent average velocities from the training clips. + +3. **Target Pose Selection** — Based on the `mode` and `random_seed`, a target pose is fetched from the pre-loaded clip library and aligned (rotated/translated) to match the spring model's predicted target position and heading. + +4. **Motion Inference** — The core motion model fills in the motion between the context (current state) and target (desired future state), producing a natural transition. + +5. **Post-processing** — The output is converted back to MuJoCo qpos in the original world frame, and the first 4 frames are blended with the input context for smooth transitions. + +--- + +## Deployment Integration + +This section describes how the C++ deployment stack uses the planner at runtime. Understanding this is useful for building custom integrations or modifying the replan behavior. + +### Threading Model + +The planner runs on a **dedicated thread at 10 Hz** (`planner_dt = 0.1s`), separate from the control loop (50 Hz) and input thread (100 Hz). The planner thread: + +1. Reads the latest `MovementState` from a thread-safe buffer (written by the input interface). +2. Decides whether a replan is needed. +3. If so, calls `UpdatePlanning()` which runs TensorRT inference. +4. Stores the result in a shared buffer that the control thread picks up on its next tick. + +### Initialization + +When the planner is first enabled (e.g., pressing **ENTER** on the keyboard interface), the planner thread: + +1. Reads the robot's current base quaternion and joint positions from the latest `LowState`. +2. Calls `Initialize()`, which: + - Sets up a 4-frame context at the default standing height with zero-yaw orientation. + - Runs an initial inference with `IDLE` mode and no movement. + - Resamples the 30 Hz output to 50 Hz. +3. The control thread detects the new planner motion and switches `current_motion_` to the planner output. + +### Context Construction + +The planner requires a 4-frame context (`context_mujoco_qpos` of shape `[1, 4, 36]`). During operation, this context is sampled from the **current planner motion** (not the robot state): + +- The context starts at `gen_frame = current_frame + motion_look_ahead_steps` (default look-ahead = 2 frames at 50 Hz). +- 4 frames are sampled at 30 Hz intervals from this starting point. +- Joint positions, body positions, and quaternions are linearly interpolated (quaternions via slerp) between 50 Hz motion frames to produce the 30 Hz context samples. + +### Replan Logic + +Not every planner tick triggers a replan. The decision follows this priority: + +**1. Always replan when** (regardless of static/non-static mode): +- Locomotion mode changed +- Facing direction changed +- Height changed + +**2. For non-static modes only**, also replan when any of the following is true: +- Movement speed changed +- Movement direction changed +- Periodic replan timer expired **and** movement speed is non-zero + +Static modes (Idle, Squat, Kneel, Lying, Idle Boxing) **never** trigger replans from the second category — they only replan on mode/facing/height changes from the first category. + +**Replan intervals** (periodic timer) vary by locomotion type to balance responsiveness and computational cost: + +| Locomotion Type | Replan Interval | +|----------------|-----------------| +| Running | 0.1 s (every planner tick) | +| Crawling | 0.2 s | +| Boxing (punches, hooks) | 1.0 s | +| All others (walk, squat, styled, etc.) | 1.0 s | + +The periodic timer only triggers a replan if the current movement speed is non-zero — a stationary robot in a non-static mode (e.g., Walk mode with speed 0) will not replan on the timer. + +### Output Resampling (30 Hz → 50 Hz) + +The planner model outputs frames at **30 Hz**. The deployment stack resamples them to **50 Hz** (the control loop rate) using linear interpolation: + +- For each 50 Hz frame, compute the corresponding fractional 30 Hz frame index. +- Linearly interpolate joint positions and body positions between the two nearest 30 Hz frames. +- Slerp-interpolate body quaternions. +- Compute joint velocities by finite differencing the resampled positions (`(pos[t+1] - pos[t]) * 50`). + +The resampled motion is stored in `planner_motion_50hz_` and has `num_pred_frames * 50/30` frames (rounded down). + +### Animation Blending + +When a new planner output arrives while the previous one is still playing, the control thread **blends** the old and new animations over an 8-frame cross-fade: + +1. The old animation is rebased so `current_frame` maps to frame 0. +2. The new animation is aligned to start at `gen_frame - current_frame` in the rebased timeline. +3. Over 8 frames starting from the blend point, a linearly increasing weight `w_new` (0 → 1) is applied: + - Joint positions/velocities: `w_old * old + w_new * new` + - Body positions: `w_old * old + w_new * new` + - Body quaternions: `slerp(old, new, w_new)` +4. After the blend region, the new animation takes over completely. +5. `current_frame` is reset to 0 on the blended result. + +This ensures smooth transitions between successive planner outputs without visible discontinuities. + +### TensorRT Acceleration + +The planner runs via **TensorRT** with CUDA graph capture for low-latency inference: + +1. At startup, the ONNX model is converted to a TensorRT engine (cached on disk). +2. A **CUDA graph** is captured during initialization — this records the entire inference pass (input copy → kernel launches → output copy) as a single replayable graph. +3. On each replan, inputs are copied to GPU via pinned memory (`TPinnedVector`), the CUDA graph is launched, and outputs are copied back. +4. FP16 precision is supported via `--planner-precision 16` (default is FP32). + +### Planner Model Versions + +The deployment stack supports multiple planner model versions, auto-detected from the model filename: + +| Version | Inputs | Modes | Description | +|---------|--------|-------|-------------| +| V0 | 6 | 4 (Idle, Slow Walk, Walk, Run) | Basic locomotion only | +| V1 | 11 | 20 | Adds squat/kneel/boxing/styled walks + height control + waypoint targets | +| V2 | 11 | 27 | All V1 modes + additional styled walking modes | + +The version is determined by the presence of `V0`, `V1`, or `V2` in the planner model filename. Version determines: +- The number of input tensors (6 vs 11) +- The valid range of `mode` values +- Whether `height`, `has_specific_target`, `specific_target_positions`, `specific_target_headings`, and `allowed_pred_num_tokens` inputs are used + +--- + +## Model Properties + +The exported ONNX model has the following properties: + +- **ONNX opset version**: 17 +- **Batch size**: 1 (fixed) + +The model is distributed as a single `.onnx` file, along with a `.pt` file containing reference input/output tensors that can be used for validation. + +```{admonition} Coming Soon +:class: note +The training code, export tooling, and a full technical report will be released soon. Stay tuned for updates. +``` + +--- + +## Usage Example + +```python +import onnxruntime as ort +import numpy as np + +# Load the ONNX model +session = ort.InferenceSession("kinematic_planner.onnx") + +# Primary inputs +inputs = { + "context_mujoco_qpos": current_qpos_buffer.astype(np.float32), # [1, 4, 36] + "target_vel": np.array([-1.0], dtype=np.float32), # -1.0 = use mode default + "mode": np.array([2], dtype=np.int64), # 2 = walk + "movement_direction": np.array([[1.0, 0.0, 0.0]], dtype=np.float32), # forward + "facing_direction": np.array([[1.0, 0.0, 0.0]], dtype=np.float32), # face forward + "height": np.array([-1.0], dtype=np.float32), # -1.0 = disabled + + # Advanced inputs (typically managed by the C++ stack) + "random_seed": np.array([1234], dtype=np.int64), + "has_specific_target": np.array([[0]], dtype=np.int64), + "specific_target_positions": np.zeros([1, 4, 3], dtype=np.float32), + "specific_target_headings": np.zeros([1, 4], dtype=np.float32), + "allowed_pred_num_tokens": np.ones([1, 11], dtype=np.int64), # K = 11 for default model +} + +# Run inference +mujoco_qpos, num_pred_frames = session.run(None, inputs) + +# Extract valid frames only +num_frames = int(num_pred_frames) +predicted_motion = mujoco_qpos[0, :num_frames, :] # [num_frames, 36] + +# Each row: [x, y, z, qw, qx, qy, qz, 29 joint angles in radians] +for frame in predicted_motion: + root_pos = frame[:3] + root_quat = frame[3:7] # (w, x, y, z) + joint_angles = frame[7:36] # 29 DOF positions +``` diff --git a/docs/source/resources/citations.md b/docs/source/resources/citations.md new file mode 100644 index 0000000..b84152f --- /dev/null +++ b/docs/source/resources/citations.md @@ -0,0 +1,12 @@ +# Citations + +If you use GEAR-SONIC in your research, please cite: + +```bibtex +@article{luo2025sonic, + title={SONIC: Supersizing Motion Tracking for Natural Humanoid Whole-Body Control}, + author={Luo, Zhengyi and Yuan, Ye and Wang, Tingwu and Li, Chenran and Chen, Sirui and Casta\~neda, Fernando and Cao, Zi-Ang and Li, Jiefeng and Minor, David and Ben, Qingwei and Da, Xingye and Ding, Runyu and Hogg, Cyrus and Song, Lina and Lim, Edy and Jeong, Eugene and He, Tairan and Xue, Haoru and Xiao, Wenli and Wang, Zi and Yuen, Simon and Kautz, Jan and Chang, Yan and Iqbal, Umar and Fan, Linxi and Zhu, Yuke}, + journal={arXiv preprint arXiv:2511.07820}, + year={2025} +} +``` diff --git a/docs/source/resources/contributing.md b/docs/source/resources/contributing.md new file mode 100644 index 0000000..5da54b5 --- /dev/null +++ b/docs/source/resources/contributing.md @@ -0,0 +1,3 @@ +# Contributing + +We welcome contributions! Coming soon... diff --git a/docs/source/resources/license.md b/docs/source/resources/license.md new file mode 100644 index 0000000..5339979 --- /dev/null +++ b/docs/source/resources/license.md @@ -0,0 +1,17 @@ +# License + +This project uses dual licensing. + +## Source Code + +The source code is licensed under the **Apache License 2.0**. + +## Model Weights + +The trained model checkpoints and weights are licensed under the **NVIDIA Open Model License**. + +See the complete [LICENSE](https://github.com/NVlabs/GR00T-WholeBodyControl/blob/main/LICENSE) file for the full text of both licenses. + +The NVIDIA Open Model License permits commercial use and allows you to create derivative models. When distributing models, you must include attribution and comply with [NVIDIA Trustworthy AI Terms](https://www.nvidia.com/en-us/agreements/trustworthy-ai/terms/). + +For licensing questions, contact [gear-wbc@nvidia.com](mailto:gear-wbc@nvidia.com). diff --git a/docs/source/resources/support.md b/docs/source/resources/support.md new file mode 100644 index 0000000..0135ea3 --- /dev/null +++ b/docs/source/resources/support.md @@ -0,0 +1,3 @@ +# Support + +For questions and issues, please contact the GEAR WBC team at [gear-wbc@nvidia.com](mailto:gear-wbc@nvidia.com). diff --git a/docs/source/tutorials/gamepad.md b/docs/source/tutorials/gamepad.md new file mode 100644 index 0000000..e0ee1bb --- /dev/null +++ b/docs/source/tutorials/gamepad.md @@ -0,0 +1,117 @@ +# Motion Tracking and Kinematic Planner with Gamepad Controls + +Control the robot using a Unitree wireless gamepad for reference motion playback and planner-based locomotion (using `--input-type gamepad`). + +```{admonition} Prerequisites +:class: note +Complete the [Installation Guide](../getting_started/installation_deploy) and build the project before proceeding. +``` + +```{warning} +The gamepad interface requires a physical Unitree wireless gamepad connected to the robot. It is **not available in sim2sim** — use the [keyboard interface](keyboard.md) to test in simulation first. +``` + +```{admonition} Emergency Stop +:class: danger +Press **Select** at any time to immediately stop control and exit. Always keep a hand ready to press **Select**. +``` + +## Launch + +```bash +# From gear_sonic_deploy/ +bash deploy.sh real --input-type gamepad +``` + +## Step-by-Step: Normal Mode (Reference Motion Tracking) + +Normal Mode plays back pre-loaded reference motions. This is the default mode when the program starts. + +1. Press **Start** to start the control system. +2. Press **A** to play the current reference motion — the robot executes it to completion. +4. Press **R1** to switch to the next motion sequence, or **L1** for the previous one. +5. Press **A** again to play the new motion. +6. To stop mid-motion and return to the first frame, press **B** — the robot pauses without terminating the policy. +7. Use **D-pad Left / Right** to nudge the heading (±0.1 rad per press). +8. Press **X** or **Y** to reinitialize the base quaternion and reset the heading to zero, i.e. robot will think the current facing is the facing at the first frame of the reference. +9. When done, press **Select** to stop control and exit. + +## Step-by-Step: Planner Mode (Real-time Motion Generation) + +Planner Mode gives you analog stick control — steer with the left stick, aim the facing direction with the right stick, and cycle through locomotion modes. + +1. From Normal Mode, press **F1** to switch to Planner Mode. The terminal will print `Planner enabled`. +2. The robot starts in Slow Walk mode (mode 1). Push the **left stick forward** to walk — movement direction is computed from the stick angle relative to the current facing direction. +3. Steer the **right stick left / right** to smoothly rotate the facing direction (continuous, ±0.02 rad per frame). +4. Press **R1** to cycle to the next movement mode (Idle → Slow Walk → Walk → Run → Squat → Kneel Two Legs → Kneel → Idle → …). Press **L1** to cycle backward. +5. Hold **R2** to increase speed (for standing modes 1–3) or height (for squat modes 4–6). Hold **L2** to decrease. Speed/height changes by ±0.02 per frame while held. +6. When the left stick is in the dead zone, standing modes automatically switch to Idle; squat/kneel modes hold their pose with speed 0. +7. To pause, press **B** — the robot resets to Idle immediately. +8. Press **F1** again to return to Normal Mode, or **Select** to stop and exit. + +## Control Reference + +### System Controls (Both Modes) + +| Button | Action | +|--------|--------| +| **Start** | Start control system | +| **Select** | Stop control and exit (emergency stop) | +| **F1** | Toggle between Normal / Planner modes | +| **X** or **Y** | Reinitialize base quaternion and reset heading | +| **D-pad Left / Right** | Adjust delta heading (±0.1 rad) | + +### Normal Mode Buttons + +| Button | Action | +|--------|--------| +| **A** | Play current motion to completion | +| **B** | Restart current motion from beginning (pause at frame 0) | +| **L1** / **R1** | Previous / Next motion sequence | + +### Planner Mode Buttons + +**Movement:** + +| Input | Action | +|-------|--------| +| **Left Stick** | Movement direction (computed from stick angle + facing direction) | +| **Right Stick** | Facing direction (continuous rotation, ±0.02 rad/frame) | + +**Mode & Speed:** + +| Button | Action | +|--------|--------| +| **L1** / **R1** | Previous / Next movement mode (cycles 0–6) | +| **L2** (hold) | Decrease speed (modes 1–3) or height (modes 4–6), ±0.02/frame | +| **R2** (hold) | Increase speed (modes 1–3) or height (modes 4–6), ±0.02/frame | +| **A** | Play / resume motion | + +**Emergency:** + +| Button | Action | +|--------|--------| +| **B** | Pause (reset to Idle) | +| **Select** | Stop control and exit | + +## Movement Modes + +The gamepad cycles through 7 modes with **L1** / **R1**. Use **L2** / **R2** (hold) to adjust speed or height. + +| ID | Mode | L2/R2 Adjusts | Range | +|----|------|---------------|-------| +| 0 | Idle | — | — | +| 1 | Slow Walk | speed | 0.2–0.8 m/s | +| 2 | Walk | speed | 0.8–1.5 m/s | +| 3 | Run | speed | 1.5–3.0 m/s | +| 4 | Squat | height | 0.1–0.8 m | +| 5 | Kneel (two legs) | — | — | +| 6 | Kneel | — | — | + +```{tip} +For lateral (side-stepping) movement, we recommend keeping the target velocity at around **0.4 m/s**. Higher velocities during strafing can cause the robot's feet to collide due to the cross-legged foot placement required for lateral steps. +``` + +```{note} +When entering Squat (mode 4) from an adjacent mode, the height automatically initializes to 0.8 m. +``` diff --git a/docs/source/tutorials/keyboard.md b/docs/source/tutorials/keyboard.md new file mode 100644 index 0000000..152de13 --- /dev/null +++ b/docs/source/tutorials/keyboard.md @@ -0,0 +1,252 @@ +# Motion Tracking and Kinematic Planner with Keyboard Controls + +
+ +
In-the-wild navigation demo using the kinematic planner with keyboard controls.
+
+ +```{video} ../_static/Keyboard_Guidance.mp4 +:width: 100% +``` +*Video: Keyboard control walkthrough — starting the control system, playing reference motions, and using planner mode for real-time locomotion.* + +Control the robot using keyboard commands for reference motion playback and planner-based locomotion (using `--input-type keyboard`). + +```{admonition} Prerequisites +:class: note +Complete the [Quick Start](../getting_started/quickstart.md) to have the sim2sim loop running. +``` + +```{admonition} Emergency Stop +:class: danger +Press **`O`** at any time to immediately stop control and exit. Always keep a hand near the keyboard ready to press **`O`**. +``` + +## Launch + +**Sim2Sim (MuJoCo):** + +```bash +# Terminal 1 — MuJoCo simulator (from repo root) +source .venv_sim/bin/activate +python gear_sonic/scripts/run_sim_loop.py + +# Terminal 2 — C++ deployment (from gear_sonic_deploy/) +bash deploy.sh sim --input-type keyboard +``` + +**Real Robot:** + +```bash +# From gear_sonic_deploy/ +bash deploy.sh real --input-type keyboard +``` + +## Step-by-Step: Normal Mode (Reference Motion Tracking) + +Normal Mode plays back pre-loaded reference motions. This is the default mode when the program starts. + +1. In Terminal 2, press **`]`** to start the control system. +2. In the MuJoCo window, press **`9`** to drop the robot to the ground. +3. Go back to Terminal 2, press **`T`** to play the current reference motion — the robot executes it to completion. +4. Press **`N`** to switch to the next motion sequence, or **`P`** for the previous one. +5. Press **`T`** again to play the new motion. +6. To replay the same motion, press **`T`** again after it finishes. To stop mid-motion and return to the first frame, press **`R`** — the robot pauses at the first frame without terminating the policy. +7. Use **`Q`** / **`E`** to nudge the heading left or right (±π/12 rad per press). +8. Press **`I`** to reinitialize the base quaternion and reset the heading to zero, i.e. robot will think the current facing is the facing at the first frame of the reference. +9. When done, press **`O`** to stop control and exit. + +## Step-by-Step: Planner Mode (Real-time Motion Generation) + +Planner Mode lets you control the robot in real time — choose a locomotion style, steer with WASD, and adjust speed and height on the fly. + +1. From Normal Mode, press **`ENTER`** to switch to Planner Mode. The terminal will print `Planner enabled`. +2. The robot starts in the **Locomotion** motion set. Press **`1`** for slow walk, **`2`** for walk, or **`3`** for run. +3. Press **`W`** to walk forward. The robot uses a momentum system — holding a direction key sets momentum to full; releasing it lets the robot gradually decelerate and return to idle. +4. Steer with **`A`** / **`D`** (adjust heading and moving direction together) or turn in place with **`Q`** / **`E`** (±π/6 rad per press, only facing direction). +5. Press **`,`** / **`.`** to strafe left / right. +6. Press **`S`** to move backward. +7. Adjust speed with **`9`** (decrease) / **`0`** (increase). Speed ranges depend on the current mode (see tables below). +8. Press **`N`** to cycle to the next motion set (Locomotion → Squat → Boxing → Styled Walking → …). Use **`P`** to go back. +9. Within a motion set, press **`1`**–**`8`** to pick a specific mode (see the Motion Sets section below). +10. For squat-type modes, adjust body height with **`-`** (lower) / **`=`** (higher), clamped to 0.2–0.8 m. +11. If you need an immediate halt, press **`R`**, **`` ` ``**, or **`~`** — this resets movement momentum to zero instantly. +12. Press **`ENTER`** again to return to Normal Mode, or **`O`** to stop and exit. + +## Control Reference + +### System Controls (Both Modes) + +| Key | Action | +|-----|--------| +| **]** | Start control system | +| **O** | Stop control and exit (emergency stop) | +| **ENTER** | Toggle between Normal / Planner modes | +| **I** | Reinitialize base quaternion and reset heading | +| **Z** | Toggle encoder mode (between mode 0 and mode 1, if encoder loaded) | + +### Normal Mode Keys + +| Key | Action | +|-----|--------| +| **T** | Play current motion to completion | +| **R** | Restart current motion from beginning (pause at frame 0) | +| **P** / **N** | Previous / Next motion sequence | +| **Q** / **E** | Adjust delta heading (at policy level) left / right (±π/12 rad) | + +### Planner Mode Keys + +**Movement:** + +| Key | Action | +|-----|--------| +| **W** / **S** | Move forward / backward | +| **A** / **D** | Adjust heading slightly and move forward (left / right) | +| **,** / **.** | Strafe left / right | + +**Heading:** + +| Key | Action | +|-----|--------| +| **Q** / **E** | Adjust facing direction (at planner level) left / right (±π/6 rad) | +| **J** / **L** | Adjust delta heading (at policy level) left / right (±π/12 rad) | + +**Mode & Speed:** + +| Key | Action | +|-----|--------| +| **N** / **P** | Next / Previous motion set | +| **1**–**8** | Select mode within the current set | +| **9** / **0** | Decrease / Increase movement speed | +| **-** / **=** | Decrease / Increase height (non-standing sets, 0.2–0.8 m) | +| **T** | Play motion | + +**Emergency:** + +| Key | Action | +|-----|--------| +| **R** / **`** / **~** | Emergency stop (immediate momentum reset) | + +## Motion Sets + +A **motion set** is a group of related movement styles (e.g., locomotion, gestures, or crouching). Selecting a mode within a set makes the robot behave in that style. Each set contains up to eight selectable modes. + +Cycle through motion sets with **`N`** (next) / **`P`** (previous). Within each set, press **`1`**–**`8`** to select a mode. For more details on the underlying planner model, mode indices, and input/output specifications, see the [Kinematic Planner ONNX Model Reference](../references/planner_onnx.md). + +### Set 0 — Locomotion (Standing) + +| Key | Mode | Speed Range | +|-----|------|-------------| +| **1** | Slow Walk | 0.2–0.8 m/s | +| **2** | Walk | — | +| **3** | Run | 1.5–3.0 m/s | +| **4** | Happy | — | +| **5** | Stealth | — | +| **6** | Injured | — | + +```{tip} +For lateral (side-stepping) movement using **`,`** / **`.`**, we recommend keeping the target velocity at around **0.4 m/s**. Higher velocities during strafing can cause the robot's feet to collide due to the cross-legged foot placement required for lateral steps. +``` + +
+
+ +
Happy styled walking.
+
+
+ +
Stealth styled walking.
+
+
+ +
Injured styled walking.
+
+
+ +
Running locomotion mode.
+
+
+ +### Set 1 — Squat / Ground + +Height adjustable with **`-`** / **`=`** (0.2–0.8 m). Initial height defaults to 0.8 m when entering this set. + +| Key | Mode | Speed Range | +|-----|------|-------------| +| **1** | Squat | static | +| **2** | Kneel (Two Legs) | static | +| **3** | Kneel (One Leg) | static | +| **4** | Hand Crawling | 0.4–1.5 m/s | +| **5** | Elbow Crawling | 0.7–1.5 m/s | + +
+
+ +
Kneeling mode with variable height control.
+
+
+ +
Hand crawling locomotion.
+
+
+ +
Elbow crawling locomotion.
+
+
+ +### Set 2 — Boxing + +| Key | Mode | Speed Range | +|-----|------|-------------| +| **1** | Idle Boxing | static | +| **2** | Walk Boxing | 0.7–1.5 m/s | +| **3** | Left Jab | 0.7–1.5 m/s | +| **4** | Right Jab | 0.7–1.5 m/s | +| **5** | Random Punches | 0.7–1.5 m/s | +| **6** | Left Hook | 0.7–1.5 m/s | +| **7** | Right Hook | 0.7–1.5 m/s | + +
+ +
Boxing mode demo.
+
+ +### Set 3 — Additional Styled Walking + +| Key | Mode | +|-----|------| +| **1** | Careful | +| **2** | Object Carrying | +| **3** | Crouch | +| **4** | Happy Dance | +| **5** | Zombie | +| **6** | Point | +| **7** | Scared | + +## Movement Momentum System + +The planner usage in keyboard mode uses a momentum-based movement system: +- Pressing a direction key (**W/S/A/D/,/.**) sets momentum to **1.0** (full speed). +- Each frame without a direction key press, momentum decays multiplicatively (×0.999). +- When momentum drops below **0.1**, the robot transitions to idle (for the Locomotion set) or holds the current static pose (for Squat and Boxing sets). +- Emergency stop (**R/`/~**) instantly resets momentum to zero. + +This means you don't need to hold a key down — a single press starts movement, and the robot coasts to a stop naturally. diff --git a/docs/source/tutorials/manager.md b/docs/source/tutorials/manager.md new file mode 100644 index 0000000..095d960 --- /dev/null +++ b/docs/source/tutorials/manager.md @@ -0,0 +1,117 @@ +# Interface Manager (All-In-One) + +Dynamically switch between keyboard, gamepad, and ZMQ input interfaces at runtime using hotkeys (`--input-type manager`). The manager owns all interfaces simultaneously and delegates to the currently active one. You can switch at any time without restarting the program. + +```{admonition} Prerequisites +:class: note +Complete the [Quick Start](../getting_started/quickstart.md) to have the sim2sim loop running. +``` + +```{admonition} Emergency Stop +:class: danger +Press **`O`** at any time to immediately stop control and exit — this works regardless of which interface is active, including when the gamepad is selected. Always keep a hand near the keyboard ready to press **`O`**. +``` + +## Launch + +**Sim2Sim (MuJoCo):** + +```bash +# Terminal 1 — MuJoCo simulator (from repo root) +source .venv_sim/bin/activate +python gear_sonic/scripts/run_sim_loop.py + +# Terminal 2 — C++ deployment (from gear_sonic_deploy/) +bash deploy.sh sim --input-type manager +``` + +**Real Robot:** + +```bash +# From gear_sonic_deploy/ +bash deploy.sh real --input-type manager +``` + +If you plan to use the ZMQ interface, add ZMQ flags: + +```bash +bash deploy.sh sim --input-type manager \ + --zmq-host \ + --zmq-port 5556 \ + --zmq-topic pose +``` + +## Step-by-Step + +1. The manager starts in **Keyboard** mode by default. +2. Press **`]`** to start the control system (keyboard mode). Use the robot as described in the [Keyboard tutorial](keyboard.md). +3. To switch to gamepad, press **`Shift+2`** (types `@`). The terminal prints `Switched to: GAMEPAD (safety reset triggered)`. The robot returns to reference motion at frame 0 and the planner is disabled. +4. Use the gamepad controls as described in the [Gamepad tutorial](gamepad.md). +5. To switch to ZMQ streaming, press **`Shift+3`** (types `#`). A safety reset is triggered and the terminal prints `Switched to: ZMQ`. Use the ZMQ controls as described in the [ZMQ tutorial](zmq.md). +6. To switch back to keyboard at any time, press **`Shift+1`** (types `!`). +7. Press **`O`** to stop control and exit from any interface. + +## Switching Interfaces + +| Hotkey | Interface | Notes | +|--------|-----------|-------| +| **Shift+1** (`!`) | Keyboard | Default. Full keyboard controls (Normal + Planner modes). See [Keyboard tutorial](keyboard.md). | +| **Shift+2** (`@`) | Gamepad | Unitree wireless gamepad. See [Gamepad tutorial](gamepad.md). | +| **Shift+3** (`#`) | ZMQ | ZMQ streaming (requires `--zmq-host` etc.). See [ZMQ tutorial](zmq.md). | + +```{tip} +A **ROS2** interface is also available via **Shift+4** (`$`) when built with ROS2 support. It requires the planner to be loaded and falls back to Keyboard otherwise. The ROS2 interface is provided as a reference implementation for building custom ROS2-based control pipelines and may not receive the same level of updates as the other interfaces. +``` + +### What Happens When You Switch + +Each switch triggers a **safety reset** on **all** managed interfaces (not just the one you're switching to). This: + +- Disables the planner and returns to reference motion at frame 0 +- Resets heading and movement states +- Disables ZMQ streaming (if it was active) +- Prevents control discontinuities from stale state in the previous interface + +The safety reset ensures you always start from a clean state after switching, regardless of what the previous interface was doing. + +```{note} +All four interfaces are created at startup and stay alive across switches. Their internal state (e.g., gamepad mode, ZMQ connection) is preserved — only the planner/motion state is reset for safety. This means you don't need to re-establish the ZMQ connection or re-pair the gamepad when switching back. +``` + +## Global Controls + +These controls work at the manager level, **regardless of which interface is active**. They are intercepted by the manager before being passed to the active interface. + +### Emergency Stop + +| Key | Action | +|-----|--------| +| **O** / **o** | Immediate emergency stop — works even when gamepad or ZMQ is the active interface | + +### Compliance Controls + +These adjust hand compliance and grasp parameters globally. They are especially useful during teleoperation (ZMQ / ROS2) where the robot's hands interact with objects. + +| Key | Action | +|-----|--------| +| **G** / **H** | Increase / Decrease left hand compliance by 0.1 | +| **B** / **V** | Increase / Decrease right hand compliance by 0.1 | +| **X** / **C** | Increase / Decrease max hand close ratio by 0.1 | + +```{note} +Compliance controls are global — they affect the robot's hands no matter which interface is active. This lets you adjust compliance from the keyboard even while the gamepad or ZMQ is controlling the robot's movement. +``` + +## Interface-Specific Controls + +Once an interface is active, all its normal controls work as documented in its own tutorial: + +- **Keyboard** (`!`): All keys from the [Keyboard tutorial](keyboard.md) — T/R/P/N for motions, ENTER for planner, WASD for movement, etc. +- **Gamepad** (`@`): All buttons from the [Gamepad tutorial](gamepad.md) — Start, A/B, L1/R1, analog sticks, etc. +- **ZMQ** (`#`): ENTER to toggle streaming, T/P/N/R for reference motions when not streaming. See the [ZMQ tutorial](zmq.md). + +The only exceptions are: +- **`O`** is always intercepted by the manager for emergency stop (not passed to the active interface). +- **`G/H/B/V/X/C`** are always intercepted for compliance control. +- **`!/@ /#/$`** are always intercepted for interface switching. + diff --git a/docs/source/tutorials/vr_wholebody_teleop.md b/docs/source/tutorials/vr_wholebody_teleop.md new file mode 100644 index 0000000..fbd6b21 --- /dev/null +++ b/docs/source/tutorials/vr_wholebody_teleop.md @@ -0,0 +1,303 @@ +# PICO VR Whole-body Teleop + +Full whole-body teleoperation using PICO VR headset and controllers. To teleop, use the option `--input-type zmq_manager` during deployment. The `zmq_manager` input type switches between a **planner mode** (locomotion commands via ZMQ) and a **streamed motion mode** (full-body SMPL poses from PICO). + +```{admonition} Safety Warning +:class: danger +Whole-body teleoperation involves fast, agile motions. **Always** maintain a clear safety zone and keep a safety operator at the keyboard ready to trigger an emergency stop (**`O`** in the C++ terminal, or **A+B+X+Y** on the PICO controllers). + +You **must wear tight-fitting pants or leggings** to guarantee line-of-sight for the foot trackers — loose or baggy clothing can make tracking fail unpredictably and may result in dangerous motion. +``` + +```{video} ../_static/teleop/teleop_session_overview.mp4 +:width: 100% +``` +*Video: End-to-end teleoperation walkthrough — PICO calibration, policy engagement, and the robot balancing independently. See the [Teleoperation Guide](../user_guide/teleoperation.md) for detailed best practices.* + +## Prerequisites + +1. **Completed the [Quick Start](../getting_started/quickstart.md)** — you can run the sim2sim loop. +2. **Completed the [VR Teleop Setup](../getting_started/vr_teleop_setup.md)** — PICO hardware is installed, calibrated, connected, and `.venv_teleop` is ready. + +--- + +## Step-by-Step: Teleop in SIM + +Run **three terminals** to teleoperate the simulated robot. + +### Terminal 1 — Launch virtual robot in MuJoCo Simulator + +From the **repo root**: + +```bash +# bash install_scripts/install_pico.sh + +source .venv_teleop/bin/activate +python gear_sonic/scripts/run_sim_loop.py +``` + +### Terminal 2 — C++ Deployment + +From `gear_sonic_deploy/`: + +```bash +cd gear_sonic_deploy +source scripts/setup_env.sh +./deploy.sh sim --input-type zmq_manager +# Wait until you see "Init done" +``` + +```{note} +The `--zmq-host` flag defaults to `localhost`, which is correct when both C++ deployment scripts and teleop scripts (Terminal 3) run on the same machine. If the teleop script runs on a different machine, pass `--zmq-host `. +``` + +### Terminal 3 — PICO Teleop Streamer + +From the **repo root**: + +```bash +source .venv_teleop/bin/activate + +# With full visualization (recommended for first run): +python gear_sonic/scripts/pico_manager_thread_server.py --manager \ + --vis_vr3pt --vis_smpl + +# Without visualization (for headless / onboard practice): +# python gear_sonic/scripts/pico_manager_thread_server.py --manager +``` + +When you turn on the visualization, wait for a window to pop up showing a Unitree G1 mesh with all joints at the default angles. If no window shows up, double-check the PICO's XRoboToolKit IP configuration in the [VR Teleop Setup](../getting_started/vr_teleop_setup.md). If no window shows up and the script says "Waiting for body tracking data" and you have trackers on, then recalibrate your pico trackers. + +### Your First Teleop Session + +1. **Assume the calibration pose** — stand upright, feet together, upper arms at your sides, forearms bent 90° forward (L-shape at each elbow), palms inward. See [Calibration Pose](#calibration-pose) for details. +2. Press **A + B + X + Y** simultaneously to engage the control policy and run the initial full calibration (`CALIB_FULL`). +3. Align your arms with the robot's current pose, then press **A + X** to enter full-body SMPL teleop (**POSE** mode). Move your arms and legs — the robot follows. +4. Press **A + X** again to fall back to **PLANNER** (idle) mode. +5. Press **A + B + X + Y** again to stop the robot. + +
+ +
Basic whole-body teleop workflow: calibration pose → engage → POSE mode → PLANNER idle → stop.
+
+ +--- + +(pico-controls)= + +## Complete PICO Controls + +### Modes & Calibration + +The system has **4 operating modes** and **2 calibration types**. + +**Modes:** + +| Mode | Encoder | Description | +|---|---|---| +| **OFF** | -- | Policy not running. Stand in [calibration pose](#calibration-pose), then press **A+B+X+Y** to start policy. | +| **POSE** | SMPL | Whole-body teleop — streaming the SMPL pose from PICO to the C++ deployment side. Your motion will directly map to the robot .| +| **PLANNER** | G1 | Locomotion planner active; upper body controller by planner. Joysticks control direction and heading in walking and running modes. | +| **PLANNER_FROZEN_UPPER** | G1 | Planner locomotion; upper body frozen at last POSE snapshot. | +| **VR_3PT** | TELEOP | Planner locomotion; upper body follows VR 3-point tracking (head + 2 hands). Depends on non-IK-based VR 3-point calibration. | + +**Calibration types** (non-IK workflow for minimal latency): + +| Type | What Is Calibrated | When Triggered | +|---|---|---| +| **CALIB_FULL** | Head + both wrists against **all-zero** reference pose. | Once on first **A+B+X+Y** (startup). | +| **CALIB** | Both wrists only against the **current robot pose**. | Each switch into VR_3PT via **Left Stick Click**. | + +### State Machine + +There are 4 modes and 2 control chains. Each chain forms a triangle: **A+X** (or **B+Y**) returns to POSE from *both* the planner node and its VR_3PT sub-mode. + +```text + ┌──────────────────────────────────────┐ + │ A+B+X+Y (any mode) ──► OFF │ + └──────────────────────────────────────┘ + + Startup: + OFF ──(A+B+X+Y)──► PLANNER ──(A+X)──► POSE + CALIB_FULL + + Chain 1 — G1 encoder listens to planner-generated full-body motion: PLANNER + + PLANNER ─── L-Stick (+CALIB) ──► VR_3PT + ▲ │ ◄─────── L-Stick ───────── │ + │ │ │ + │A+X │A+X A+X │ + │ ▼ ▼ + └─ POSE ◄──────────────────────────┘ + + Chain 2 — G1 encoder listens to planner-generated lower-body motion: PLANNER_FROZEN_UPPER + + PLANNER_FROZEN_UPPER ── L-Stick (+CALIB) ──► VR_3PT + ▲ │ ◄──────── L-Stick ────────── │ + │ │ │ + │B+Y │B+Y B+Y │ + │ ▼ ▼ + └── POSE ◄───────────────────────────────┘ +``` + +```{admonition} DANGER — Mode-Switching Safety +:class: danger +**Before switching into POSE or VR_3PT**, always align your body with the robot's current pose first!! + +- **POSE:** The robot instantly snaps to your physical pose. A large mismatch causes sudden, aggressive motion. +- **VR_3PT:** A misaligned calibration produces erratic, dangerous motion as soon as you move your arms. Check more info at section [per-switch-calib](#per-switch-calib) +``` + +(calibration-pose)= + +### VR_3PT Calibration Hint + +The `VR_3PT` mode depends on accurate calibration. Two calibration events occur: + +#### One-time `CALIB_FULL` (head + wrists) + +Before pressing **A + B + X + Y** for the first time, you **must** stand in the robot's **all-zero reference pose**. The system captures your PICO body-tracking frame as the zero-reference for all subsequent motion mapping. + +**The reference pose:** +1. **Stand upright**, feet together, looking straight forward. +2. **Upper arms** hang straight down, close to your torso. +3. **Forearms** bent 90° forward (L-shape at each elbow), palms facing inward. + +```{tip} +Launch the teleop script with `--vis_vr3pt` to see the robot's reference pose in a visualization window. Match your body to it before pressing the start combo. +``` + +(per-switch-calib)= +#### Per-switch `CALIB` (wrists only) + +Each time you enter `VR_3PT` via **Left Stick Click**, the system re-calibrates both wrists against the robot's **current** pose. Always align your arms with the robot before clicking. + +Below is an example of **bad calibration practice** — transitioning into VR_3PT without aligning your arms to the robot's current pose. The robot may not jump immediately, but will exhibit erratic and dangerous motion as soon as you move. + +
+ +
Bad practice: entering VR_3PT without arm alignment causes erratic, unsafe motion.
+
+ + + + +```{admonition} DANGER — Mode-Switching Safety +:class: danger +**Before switching into POSE or VR_3PT**, always align your body with the robot's current pose first. + +**Recovery from bad VR_3PT calibration:** +1. Freeze the upper body — switch back via **Left Stick Click**. +2. Re-align your arms with the robot's current (possibly distorted) pose. +3. Switch to **POSE** mode (**A+X**) to reset. +``` + +Below is the **recovery procedure** — if you accidentally enter a badly calibrated VR_3PT state, freeze the upper body (Left Stick Click back), then switch to POSE mode (A+X) to reset safely. + +
+ +
Recovery from bad VR_3PT calibration: freeze upper body → re-align → switch to POSE mode.
+
+ +### Quick-Start Cheatsheet + +| Action | Button | Notes | +|---|---|---| +| **Start / Stop policy** | **A+B+X+Y** | First press: engage + CALIB_FULL. Again: emergency stop → OFF. | +| **Toggle POSE** | **A+X** | Switches between PLANNER ↔ POSE. OR from VR_3PT (entered via PLANNER) → POSE. | +| **Toggle PLANNER_FROZEN_UPPER** | **B+Y** | Switches between POSE ↔ PLANNER_FROZEN_UPPER. OR from VR_3PT (entered via PLANNER_FROZEN_UPPER) → POSE. | +| **Toggle VR_3PT** | **Left Stick Click** | From any Planner mode → VR_3PT (triggers CALIB). Click again to return. | +| **Hand grasp** | **Trigger** (per hand) | Controls the corresponding hand's grasp. | + +### Joystick Controls (Planner Modes) + +Active in **PLANNER**, **PLANNER_FROZEN_UPPER**, and **VR_3PT**: + +| Input | Function | +|---|---| +| **Left Stick** | Move direction (forward / backward / strafe) | +| **Right Stick (horizontal)** | Yaw / heading (continuous accumulation) | +| **A + B** | Next locomotion mode | +| **X + Y** | Previous locomotion mode | + +**Locomotion modes** (cycled via A+B / X+Y): + +| ID | Mode | +|---|---| +| 0 | Idle [DEFAULT] | +| 1 | Slow Walk | +| 2 | Walk | +| 3 | Run | +| 4 | Squat | +| 5 | Kneel (two legs) | +| 6 | Kneel | +| 7 | Lying face-down | +| 8 | Crawling | +| 9–16 | Boxing variants (idle, walk, punches, hooks) | +| 17 | Forward Jump | +| 18 | Stealth Walk | +| 19 | Injured Walk | + +--- + +## Emergency Stop + +| Method | Action | +|---|---| +| **PICO controllers** | Press **A+B+X+Y** simultaneously → OFF | +| **Keyboard** (C++ terminal) | Press **`O`** for immediate stop | + +--- + +## Step-by-Step: Teleop on Real Robot + +```{admonition} Safety Warning +:class: danger +Only proceed once you can smoothly control the robot in simulation and are comfortable with emergency stops. **Terminate any running `run_sim_loop.py` process first** — simultaneous sim and real instances will conflict. +``` + +The real-robot workflow uses **two terminals** (no MuJoCo simulator). + +### Terminal 1 — C++ Deployment (Real Robot) + +From `gear_sonic_deploy/`: + +```bash +cd gear_sonic_deploy +source scripts/setup_env.sh + +# 'real' auto-detects the robot network interface (192.168.123.x). +# If auto-detection fails, pass the G1's IP directly: +# ./deploy.sh --input-type zmq_manager +./deploy.sh real --input-type zmq_manager + +# Wait until you see "Init done" +``` + +```{note} +If the teleop script (Terminal 2) runs on a different machine, add `--zmq-host ` so the C++ side knows where the ZMQ publisher is. +``` + +### Terminal 2 — PICO Teleop Streamer + +From the **repo root**: + +```bash +source .venv_teleop/bin/activate +python gear_sonic/scripts/pico_manager_thread_server.py --manager + +# If running offboard with a display, add visualization: +# --vis_vr3pt --vis_smpl +``` + +```{note} +Update the IP in the PICO's XRoboToolKit app to match this machine before starting. +``` + +Follow the same start sequence: calibration pose → **A+B+X+Y** → **A+X** for POSE mode. See [Complete PICO Controls](#pico-controls) for all available commands. diff --git a/docs/source/tutorials/zmq.md b/docs/source/tutorials/zmq.md new file mode 100644 index 0000000..14c29a8 --- /dev/null +++ b/docs/source/tutorials/zmq.md @@ -0,0 +1,309 @@ +# Streaming Motion Tracking + +Stream motion data to the robot over ZMQ for reference motion tracking. This interface supports streaming either **SMPL-based poses** (e.g., from PICO) or **G1 whole-body joint positions** (qpos) from any external source (`--input-type zmq`). + +```{admonition} Prerequisites +:class: note +Complete the [Quick Start](../getting_started/quickstart.md) to have the sim2sim loop running. +``` + +```{admonition} Emergency Stop +:class: danger +Press **`O`** at any time to immediately stop control and exit. Always keep a hand near the keyboard ready to press **`O`**. +``` + +## Launch + +**Sim2Sim (MuJoCo):** + +```bash +# Terminal 1 — MuJoCo simulator (from repo root) +source .venv_sim/bin/activate +python gear_sonic/scripts/run_sim_loop.py + +# Terminal 2 — C++ deployment (from gear_sonic_deploy/) +bash deploy.sh sim --input-type zmq \ + --zmq-host \ + --zmq-port 5556 \ + --zmq-topic pose +``` + +**Real Robot:** + +```bash +# From gear_sonic_deploy/ +bash deploy.sh real --input-type zmq \ + --zmq-host \ + --zmq-port 5556 \ + --zmq-topic pose +``` + +## Step-by-Step + +1. Press **`]`** to start the control system. +2. By default you are in **reference motion mode** — use **`T`** to play motions, **`N`** / **`P`** to switch, **`R`** to restart (same as the [keyboard interface](keyboard.md)). +3. Press **`ENTER`** to toggle into **ZMQ streaming mode**. The terminal will print `ZMQ STREAMING MODE: ENABLED`. +4. The policy now tracks motion frames arriving from the ZMQ publisher in real time. Playback starts automatically. +5. Press **`ENTER`** again to switch back to reference motions. The terminal will print `ZMQ STREAMING MODE: DISABLED`, and the encode mode resets to `0` (joint-based). +6. Use **`Q`** / **`E`** to adjust the heading (±0.1 rad per press) in either mode. +7. Press **`I`** to reinitialize the base quaternion and reset the heading to zero. +8. When done, press **`O`** to stop control and exit. + +```{note} +**No planner support** — this interface uses pre-loaded and ZMQ-streamed reference motions only. For planner + ZMQ control (e.g., PICO VR teleoperation), use `--input-type zmq_manager` instead. See the [VR Whole-Body Teleop tutorial](vr_wholebody_teleop.md). +``` + +```{tip} +**Build your own streaming source.** The ZMQ stream protocol documented below is self-contained — any publisher that sends messages in this format can drive the robot. You can write your own motion capture retargeting pipeline, simulator bridge, or any other source that produces the required fields. No PICO hardware is needed. +``` + +## Using with PICO VR Teleop + +You can use `--input-type zmq` with the PICO teleop streamer for a simple, streaming-only whole-body teleoperation setup. In this mode, the PICO streams full-body SMPL poses over ZMQ and the deployment side tracks them directly — no locomotion planner, no PICO-button mode switching. All control is done from the keyboard. + +### Prerequisites + +1. **Completed the [Quick Start](../getting_started/quickstart.md)** — you can run the sim2sim loop. +2. **PICO VR hardware is set up** — headset and controllers are connected, body tracking is working, and `.venv_teleop` is installed. See the [VR Teleop Setup](../getting_started/vr_teleop_setup.md) for installation and calibration. + +### Launch (Sim2Sim) + +Run three terminals: + +**Terminal 1 — MuJoCo simulator** (from repo root): + +```bash +source .venv_sim/bin/activate +python gear_sonic/scripts/run_sim_loop.py +``` + +**Terminal 2 — C++ deployment** (from `gear_sonic_deploy/`): + +```bash +bash deploy.sh sim --input-type zmq \ + --zmq-host localhost \ + --zmq-port 5556 \ + --zmq-topic pose +``` + +**Terminal 3 — PICO teleop streamer** (from repo root): + +```bash +source .venv_teleop/bin/activate + +# With visualization (recommended for first run): +python gear_sonic/scripts/pico_manager_thread_server.py \ + --manager --vis_smpl --vis_vr3pt + +# Without visualization (headless): +# python gear_sonic/scripts/pico_manager_thread_server.py --manager +``` + +### Launch (Real Robot) + +Run two terminals (no MuJoCo): + +**Terminal 1 — C++ deployment** (from `gear_sonic_deploy/`): + +```bash +bash deploy.sh real --input-type zmq \ + --zmq-host \ + --zmq-port 5556 \ + --zmq-topic pose +``` + +Replace `` with `localhost` if the PICO streamer runs on the same machine, or the IP of the machine running Terminal 2. + +**Terminal 2 — PICO teleop streamer** (from repo root): + +```bash +source .venv_teleop/bin/activate +python gear_sonic/scripts/pico_manager_thread_server.py --manager +``` + +### Step-by-Step + +1. **Calibration pose**: Stand upright, feet together, upper arms at your sides, forearms bent 90° forward (L-shape at each elbow), palms facing inward. +2. On the PICO controllers, press **A + B + X + Y** simultaneously to initialize and calibrate the body tracking. +3. Press **A + X** on the PICO controllers to start streaming poses. +4. In Terminal 2 (C++ deployment), press **`]`** to start the control system. +5. In the MuJoCo window (sim only), press **`9`** to drop the robot to the ground. +6. Back in Terminal 2, press **`ENTER`** to enable ZMQ streaming. The terminal prints `ZMQ STREAMING MODE: ENABLED`. The robot begins tracking your PICO poses in real time. +7. Move your body — the robot mirrors your motions. Use the **Trigger** button on each PICO controller to close the corresponding robot hand. +8. To **pause** streaming (e.g., to reposition yourself), press **`ENTER`** again. The terminal prints `ZMQ STREAMING MODE: DISABLED`. The robot holds its last pose and stops tracking. You can move freely without affecting the robot. +9. To **resume**, press **`ENTER`** once more. The robot will snap to your current pose — **move back close to the robot's current pose before resuming** to avoid sudden jumps. +10. When done, press **`O`** to stop control and exit. + +```{admonition} DANGER — Resuming from Pause +:class: danger +When you press **`ENTER`** to resume streaming after a pause, the robot will immediately try to reach your current physical pose. If your body is in a very different position from the robot, the robot may perform sudden, aggressive motions. **Always move back close to the robot's current pose before pressing `ENTER` to resume.** +``` + +### PICO Buttons in ZMQ Mode + +In `--input-type zmq` mode, the C++ deployment side does **not** process PICO controller button combos directly. However, the buttons still affect the **Python streamer**, which controls what data gets published on the `pose` ZMQ topic. Since the deployment side tracks whatever arrives (or stops arriving) on that topic, several buttons still have an indirect effect on the robot. + +| PICO Button | Effect | +|-------------|--------| +| **A + B + X + Y** | Calibrate body tracking in the streamer. Press once to initialize; press again to stop streaming (emergency stop on the streamer side). | +| **A + X** | Toggle Pose mode in the streamer — starts or stops publishing pose data. When stopped, the robot holds its last pose. **Works as pause/resume.** | +| **Menu (hold)** | Pauses pose streaming in the streamer while held. The robot holds its last pose until you release. **Works as pause.** Move back close to the robot's current pose before releasing. | +| **Trigger** | Hand grasp — processed by the streamer and sent as `left_hand_joints` / `right_hand_joints` in the stream. | +| **B + Y** | Toggle Pose mode in the streamer (same effect as A+X) — starts or stops publishing pose data. **Works as pause/resume.** | + +All mode control on the deployment side is done from the keyboard: + +| Key | Action | +|-----|--------| +| **`]`** | Start control system | +| **`ENTER`** | Toggle streaming on/off (pause/resume) | +| **`O`** | Emergency stop — stop control and exit | +| **`I`** | Reinitialize base quaternion and reset heading | +| **`Q`** / **`E`** | Adjust heading (±0.1 rad) | + +```{note} +For the full PICO VR experience with planner support, locomotion modes, and PICO-controller-based mode switching, use `--input-type zmq_manager` instead. See the [VR Whole-Body Teleop tutorial](vr_wholebody_teleop.md). +``` + +## Controls + +| Key | Action | +|-----|--------| +| **]** | Start control system | +| **O** | Stop control and exit (emergency stop) | +| **ENTER** | Toggle between reference motions and ZMQ streaming | +| **I** | Reinitialize base quaternion and reset heading | +| **Q** / **E** | Adjust delta heading left / right (±0.1 rad) | + +*Reference motion mode only (streaming off):* + +| Key | Action | +|-----|--------| +| **T** | Play current motion to completion | +| **R** | Restart current motion from beginning (pause at frame 0) | +| **P** / **N** | Previous / Next motion sequence | + +## Stream Protocol Versions + +The encode mode is determined automatically by the ZMQ stream protocol version. **SONIC uses Protocol v1 and v3.** Protocol v2 is available for custom applications. + +### Encode Mode Logic + +The encode mode only takes effect when the policy model has an **encoder** configured and loaded. At startup, each motion's encode mode is initialized based on encoder availability: + +| `encode_mode` | Meaning | +|----------------|---------| +| `-2` | No encoder / token state configured in the model — encode mode has no effect | +| `-1` | Encoder config exists (token state dimension > 0) but no encoder model file provided | +| `0` | Encoder loaded, joint-based mode (default) | +| `1` | Encoder loaded, teleop / 3 points upper-body mode | +| `2` | Encoder loaded, SMPL-based mode | + +When ZMQ streaming is active, the protocol version sets the encode mode on the streamed motion: v1 → `0`, v2/v3 → `2`. This only affects inference if the model actually has an encoder (`encode_mode >= 0`). If no encoder is configured (`-2`), the value is set but has no effect on the inference pipeline. + +When switching back to reference motions (pressing **ENTER** to disable streaming), the encode mode resets to `0` (if the motion has an encoder, i.e. `encode_mode >= 0`). + +### Common Fields (All Versions) + +All versions require two common fields: + +| Field | Shape | Dtype | Description | +|-------|-------|-------|-------------| +| `body_quat` | `[N, 4]` or `[N, num_bodies, 4]` | `f32` / `f64` | Body quaternion(s) per frame (w, x, y, z) | +| `frame_index` | `[N]` | `i32` / `i64` | Monotonically increasing frame indices for alignment | + +```{warning} +Changing the protocol version mid-session is not allowed. If the publisher switches protocol versions while streaming, the interface will automatically disable ZMQ mode and return to reference motions for safety. + +Error message: `Protocol version changed from X to Y during active ZMQ session!` +``` + +### Protocol v1 — Joint-Based (Encode Mode 0) + +Streams raw G1 joint positions and velocities. Use this when your source provides direct qpos/qvel data (e.g., from another simulator or motion capture retargeting pipeline). + +**Required fields:** + +| Field | Shape | Dtype | Description | +|-------|-------|-------|-------------| +| `joint_pos` | `[N, 29]` | `f32` / `f64` | Joint positions in IsaacLab order (all 29 joints) | +| `joint_vel` | `[N, 29]` | `f32` / `f64` | Joint velocities in IsaacLab order (all 29 joints) | + +- `N` = number of frames per message (batch size). +- All 29 joint values must be provided and meaningful. +- Frame counts of `joint_pos` and `joint_vel` must match. + +**Common errors:** +- `Version 1 missing required fields (joint_pos, joint_vel)` — one or both fields are absent. +- `Frame count mismatch between joint_pos and joint_vel` — the `N` dimension differs. + +### Protocol v2 — SMPL-Based (Encode Mode 2) + +Streams SMPL body model data. This protocol is **not used by SONIC's built-in pipelines** — it is available for your own custom applications that produce SMPL representations, for example a plicy only observe the SMPL. + +**Required fields:** + +| Field | Shape | Dtype | Description | +|-------|-------|-------|-------------| +| `smpl_joints` | `[N, 24, 3]` | `f32` / `f64` | SMPL joint positions (24 joints × xyz) | +| `smpl_pose` | `[N, 21, 3]` | `f32` / `f64` | SMPL joint rotations in axis-angle (21 body poses × xyz) | + +- `joint_pos` and `joint_vel` are **optional** in v2. + +**Common errors:** +- `Version 2 missing required field 'smpl_joints'` or `'smpl_pose'` — required SMPL fields are absent. + +### Protocol v3 — Joint + SMPL Combined (Encode Mode 2) + +Combines both joint-level and SMPL data. This is what SONIC uses for whole-body teleoperation (e.g., PICO VR). + +**Required fields:** + +| Field | Shape | Dtype | Description | +|-------|-------|-------|-------------| +| `joint_pos` | `[N, 29]` | `f32` / `f64` | Joint positions in IsaacLab order | +| `joint_vel` | `[N, 29]` | `f32` / `f64` | Joint velocities in IsaacLab order | +| `smpl_joints` | `[N, 24, 3]` | `f32` / `f64` | SMPL joint positions (24 joints × xyz) | +| `smpl_pose` | `[N, 21, 3]` | `f32` / `f64` | SMPL joint rotations in axis-angle (21 body poses × xyz) | + +```{important} +In Protocol v3, **only the 6 wrist joints need meaningful values** in `joint_pos` — the remaining 23 joints can be zero. The wrist joint indices (in IsaacLab order) are: **[23, 24, 25, 26, 27, 28]** (3 joints per wrist × 2 wrists). The `joint_vel` values for non-wrist joints can also be zero. + +The SMPL fields (`smpl_joints`, `smpl_pose`) carry the primary motion data in v3; the wrist joints in `joint_pos` provide fine-grained wrist control that SMPL alone cannot capture. +``` + +- Frame counts across all four fields must be consistent. + +**Common errors:** +- `Version 3 missing required field 'joint_pos'` or `'joint_vel'` — joint fields are absent (unlike v2, they are required in v3). +- `Version 3 frame count mismatch between smpl_joints (X) and joint_pos (Y)` — the `N` dimension differs across fields. + +### Protocol Summary + +| Protocol | Encode Mode | Used by SONIC | Required Fields | +|----------|-------------|---------------|-----------------| +| v1 | `0` (joint-based) | ✅ Yes | `joint_pos`, `joint_vel` | +| v2 | `2` (SMPL-based) | ❌ Custom only | `smpl_joints`, `smpl_pose` | +| v3 | `2` (SMPL-based) | ✅ Yes | `joint_pos`, `joint_vel`, `smpl_joints`, `smpl_pose` | + +## Optional Stream Fields + +The following optional fields can be included in any protocol version: + +| Field | Shape | Dtype | Description | +|-------|-------|-------|-------------| +| `left_hand_joints` | `[7]` or `[1, 7]` | `f32` / `f64` | Left hand 7-DOF Dex3 joint positions | +| `right_hand_joints` | `[7]` or `[1, 7]` | `f32` / `f64` | Right hand 7-DOF Dex3 joint positions | +| `vr_position` | `[9]` or `[3, 3]` | `f32` / `f64` | VR 3-point tracking positions: left wrist, right wrist, head (xyz × 3) | +| `vr_orientation` | `[12]` or `[3, 4]` | `f32` / `f64` | VR 3-point orientations: left, right, head quaternions (wxyz × 3) | +| `catch_up` | scalar | `bool` / `u8` / `i32` | If `true` (default), resets playback when a large frame gap is detected | +| `heading_increment` | scalar | `f32` / `f64` | Incremental heading adjustment applied per message | + +## Configuration + +| Flag | Default | Description | +|------|---------|-------------| +| `--zmq-host` | `localhost` | ZMQ publisher host | +| `--zmq-port` | `5556` | ZMQ publisher port | +| `--zmq-topic` | `pose` | ZMQ topic prefix | +| `--zmq-conflate` | off | Keep only the latest message (drop stale frames) | diff --git a/docs/source/user_guide/configuration.md b/docs/source/user_guide/configuration.md new file mode 100644 index 0000000..c508229 --- /dev/null +++ b/docs/source/user_guide/configuration.md @@ -0,0 +1,3 @@ +# Configuration Guide + +Coming soon... diff --git a/docs/source/user_guide/teleoperation.md b/docs/source/user_guide/teleoperation.md new file mode 100644 index 0000000..08faeb1 --- /dev/null +++ b/docs/source/user_guide/teleoperation.md @@ -0,0 +1,245 @@ +# Whole-body Teleoperation Guide + +This guide covers best practices during whole-body teleoperation. + +```{admonition} Prerequisites +:class: note +Complete the [Quick Start](../getting_started/quickstart), [PICO Setup](../getting_started/vr_teleop_setup), and [Teleop Setup](../tutorials/vr_wholebody_teleop) +``` + +## Overview + +Whole-body teleoperation is very very hard to get right and there are a lot of moving parts. This guide will walk you through the details that are required during whole-body teleoperation. During whole-body teleoperation, the GEAR-SONIC policy will try to copy your motion **as much as possible**, including your foot movements! Thus, it can be quite **demanding and dangerous** if proper precautions are not taken. + +```{admonition} Safety Warning +:class: danger +The robot will track your full-body movements in real-time. Always maintain a clear 3-meter safety zone around the robot, keep a safety operator at the keyboard ready to press **`O`** for emergency stop and always be prepared to emergecy stop on the PICO controller on your own. Practice extensively in simulation before attempting on real hardware! +``` + +## Sample Teleoperation Session + +A typical whole-body teleoperation session follows this workflow: + + +```{video} ../_static/teleop/teleop_session_overview.mp4 +:width: 100% +``` +*Video: Full startup sequence — calibrating the PICO headset, engaging the policy, and the robot starting to balance independently on the gantry.* + +**Terminal 1 — MuJoCo Simulator** (or skip for real robot): +```bash +source .venv_teleop/bin/activate +python gear_sonic/scripts/run_sim_loop.py +``` + +**Terminal 2 — C++ Deployment**: +```bash +cd gear_sonic_deploy +# For simulation: +bash deploy.sh sim --input-type zmq_manager +# For real robot: +# bash deploy.sh real --input-type zmq_manager +``` + +**Terminal 3 — PICO Teleop Streamer**: +```bash +source .venv_teleop/bin/activate +python gear_sonic/scripts/pico_manager_thread_server.py --manager +``` + +**Operator Actions**: +1. **Put on PICO headset and controllers** — Ensure foot trackers are securely attached. +2. **Stand in calibration pose** — Upright, feet together, arms in down. Recalibrate often!!! +3. **Make robot stand loose but standing** - Put the G1 somehow slack on gantry (the policy will start and start balancing on its own). +4. **Press A+B+X+Y** on controllers — Initializes the policy and calibrates (enters Planner mode) +5. **Press A+X** — Switches to Pose mode (whole-body teleoperation active) +6. **Teleoperate** — Your movements are now mirrored by the robot +7. **Press A+B+X+Y** when done — Emergency stop and exit. Policy will stop!!! + +## Clothing Requirements + +**Critical:** You **must** wear **tight-fitting pants or leggings** during teleoperation. + +**Why this matters:** +- The PICO foot trackers use visual tracking and need a clear view of your leg/foot movements +- Loose, baggy, or flowing clothing (sweatpants, wide-leg pants, long skirts) will occlude the trackers +- Even brief tracking loss causes the robot to receive incorrect foot positions, leading to stumbling or dangerous motions + +**Recommended:** +- ✅ Athletic leggings or compression tights +- ✅ Fitted jeans or slim-fit pants + +**Not recommended:** +- ❌ Baggy sweatpants or cargo pants +- ❌ Wide-leg or flared pants +- ❌ Long dresses or skirts +- ❌ Any loose or flowing fabric around the legs + +**Upper body:** Normal fitted clothing is fine. Avoid very baggy sleeves that might interfere with controller tracking. + +**❗️❗️❗️Recalibrate Often** The tracking quality of PICO may get worse overtime. When seeing performance drop, always recalibrate! Also, when the PICO controller loses track, it may get stuck in a sitting/weird pose. Always make sure your tracked motion is correct (by viewing the PICO avatar) before starting the teleoperation policy! + +## WiFi Delays + +```{video} ../_static/teleop/teleop_natural_movement.mp4 +:width: 100% +``` +*Video: Demonstrating how natural vs. hesitant walking affects robot stability — stumbling caused by WiFi delays or unnatural movement.* + +**Network latency should be kept as low as possible.** We provide tools to detect delays. Network delays can significantly affect the GEAR-SONIC policy's performance, as the flow of movement for the robot will be interrupted mid-movement (say mid-stride during walking) and the robot can stumble or lose balance. + +**Best practices:** +- **Use Private WiFi Routers** Public and school wifis can easily have large delays. +- **Minimize WiFi hops** — Ideally the PICO and deployment machine are on the same local network +- **Check latency** — Monitor ZMQ message delays in the terminal output + + +**Expected latency:** +- **Good:** < 10ms (wired or strong local WiFi) +- **Acceptable:** 10-30ms (may notice slight lag) +- **Poor:** > 30ms (robot will struggle to track smooth motions, increased stumbling risk) + +**Checking network performance:** +The deployment terminal will show warnings if message delays exceed thresholds. Watch for messages like: +``` +WARNING: High ZMQ latency detected: 45ms +``` + +If you see persistent high latency warnings, improve your network setup before continuing. + +## Movement Patterns + +```{video} ../_static/teleop/teleop_walking.mp4 +:width: 100% +``` +*Video: Walking demo — forward, backward, running, and sideways movement with natural gait.* + +**Try to be as natural as possible.** The GEAR-SONIC policy is trained on natural human motion, so moving naturally gives the best results. Hesitating when moving actually lead to more stumbling! + +**Good movement practices:** + +1. **Walk naturally** — Use your normal gait with natural arm swing. Don't exaggerate or try to "help" the robot. + +2. **Transfer weight smoothly** — When stepping, shift your weight from one foot to the other just as you normally would. Be confident! + +3. **Use moderate speeds** — The robot can track fast motions, but start with walking at a comfortable pace. You can speed up/starts running once you're comfortable. + +**Common mistakes:** + +- ❌ **Overtly slow movements** — Don't slow down too much! +- ❌ **Trying to match the robot's movement** — Don't try to match the robot movement, as the robot will then try to match your movement and starts stumbling. Be natural! + +**Advanced movements:** + +```{video} ../_static/teleop/teleop_advanced.mp4 +:width: 100% +``` +*Video: Advanced movements — kneeling, dynamic leg motions, and challenging poses.* + +Once comfortable with basic walking: +- **Turning** — Turn naturally by rotating your torso and stepping in the new direction +- **Reaching** — Extend your arms smoothly to grab objects +- **Squatting** — Bend your knees and lower your body naturally +- **Sidestepping** — Step sideways with natural weight transfer + +## Calibration Best Practices + +The initial calibration is **critical** to successful teleoperation. + +**The calibration pose:** +1. **Stand upright** — Look straight ahead (not down at controllers) +2. **Feet together** — Foot parallel with no gaps. +3. **Upper arms down** — Hang straight down beside your torso + + +**Tips:** +- Hold the pose steady for 1-2 seconds after pressing A+B+X+Y +- If the robot seems offset throughout the session, recalibrate (stop with A+B+X+Y, then restart) + + +## Mode Switching Safety + +When switching between modes, **always match the robot's current pose first**. + +**Dangerous scenario:** +1. Robot is in Planner mode standing upright +2. You're crouching or reaching in a different pose +3. You press **A+X** to switch to Pose mode +4. **Robot violently tries to match your crouched pose** ⚠️ + +**Safe procedure:** +1. Before pressing **A+X**, look at the robot (or visualization) +2. Move your body to approximately match the robot's current pose +3. Then press **A+X** — transition will be smooth + +**Pause feature (Menu button):** + +```{video} ../_static/teleop/teleop_pause.mp4 +:width: 100% +``` +*Video: Using the Menu button to pause and resume pose streaming during teleoperation.* + +- Holding **Menu** pauses pose streaming +- **Before releasing Menu**, move your body back to match the robot's current pose +- Releasing Menu while in a very different pose causes sudden dangerous motions + +## Troubleshooting + +### Robot is not tracking my movements + +**Possible causes:** +- Foot trackers are not securely attached or have low battery +- Loose clothing is occluding the trackers +- Poor lighting conditions +- XRoboToolKit not running on PICO or configured incorrectly + +**Solutions:** +1. Check foot tracker placement and battery level +2. Verify you're wearing tight-fitting pants +3. Improve lighting (avoid very bright or very dark areas) +4. Restart XRoboToolKit on the PICO headset +5. Recalibrate + +### Robot makes sudden aggressive motions + +**Possible causes:** +- Switched modes while poses didn't match +- Tracking glitch or foot tracker occlusion +- Network packet loss causing delayed frames + +**Solutions:** +1. Recalibrate carefully before resuming +2. Always match robot's pose before switching modes +3. Check network latency and improve WiFi/wired connection + +### Tracking is jittery or stumbles + +**Possible causes:** +- Wireless delays +- IMU drift +- Joint encoder drift + + +**Solutions:** +1. Reduce WiFi interference (move away from other wireless devices) +2. Reclibrate robot + + +## Emergency Procedures + +### Emergency stop methods + +**Keyboard (deployment terminal):** +- Press **`O`** for immediate stop + +**PICO controllers:** +- Press **A + B + X + Y** simultaneously + +Both methods immediately halt the policy and exit control mode. + + +## Next Steps + +- **Understand input interfaces** — See tutorials for [Keyboard](../tutorials/keyboard.md), [Gamepad](../tutorials/gamepad.md), [ZMQ](../tutorials/zmq.md), [Manager](../tutorials/manager.md) +- **Learn about deployment** — See [Deployment Code & Program Flow](../references/deployment_code) +- **General troubleshooting** — See [Troubleshooting Guide](troubleshooting) \ No newline at end of file diff --git a/docs/source/user_guide/training.md b/docs/source/user_guide/training.md new file mode 100644 index 0000000..7a25745 --- /dev/null +++ b/docs/source/user_guide/training.md @@ -0,0 +1,3 @@ +# Training Guide + +Coming soon... diff --git a/docs/source/user_guide/troubleshooting.md b/docs/source/user_guide/troubleshooting.md new file mode 100644 index 0000000..9ca7a0e --- /dev/null +++ b/docs/source/user_guide/troubleshooting.md @@ -0,0 +1,5 @@ +# Troubleshooting + +Common issues and solutions. + +Coming soon... diff --git a/download_from_hf.py b/download_from_hf.py new file mode 100644 index 0000000..92e7f8d --- /dev/null +++ b/download_from_hf.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 +""" +Download GEAR-SONIC model checkpoints from Hugging Face Hub. + +Repository: https://huggingface.co/nvidia/GEAR-SONIC + +Usage: + python download_from_hf.py + python download_from_hf.py --output-dir /path/to/output + python download_from_hf.py --no-planner +""" + +import argparse +import shutil +import sys +from pathlib import Path + +REPO_ID = "nvidia/GEAR-SONIC" + +# (filename in HF repo, local destination relative to output_dir) +POLICY_FILES = [ + ("model_encoder.onnx", "policy/release/model_encoder.onnx"), + ("model_decoder.onnx", "policy/release/model_decoder.onnx"), + ("observation_config.yaml", "policy/release/observation_config.yaml"), +] + +PLANNER_FILE = ("planner_sonic.onnx", "planner/target_vel/V2/planner_sonic.onnx") + + +def parse_args(): + parser = argparse.ArgumentParser( + description="Download GEAR-SONIC checkpoints from Hugging Face Hub" + ) + parser.add_argument( + "--output-dir", + type=Path, + default=None, + help=( + "Directory to save files. Defaults to gear_sonic_deploy/ " + "next to this script." + ), + ) + parser.add_argument( + "--no-planner", + action="store_true", + help="Skip downloading the kinematic planner ONNX model", + ) + parser.add_argument( + "--token", + default=None, + help="Hugging Face token (or set HF_TOKEN env var / run huggingface-cli login)", + ) + return parser.parse_args() + + +def _ensure_huggingface_hub(): + try: + from huggingface_hub import hf_hub_download + return hf_hub_download + except ImportError: + print("huggingface_hub is not installed. Install it with:") + print(" pip install huggingface_hub") + sys.exit(1) + + +def download_file(hf_hub_download, repo_id, hf_filename, local_dest, token=None): + """Download hf_filename from the Hub and place it at local_dest.""" + print(f" Downloading {hf_filename} ...", flush=True) + cached = hf_hub_download( + repo_id=repo_id, + filename=hf_filename, + token=token, + ) + local_dest.parent.mkdir(parents=True, exist_ok=True) + shutil.copy2(cached, local_dest) + print(f" -> {local_dest}") + + +def main(): + args = parse_args() + hf_hub_download = _ensure_huggingface_hub() + + repo_root = Path(__file__).resolve().parent + output_dir = args.output_dir if args.output_dir else repo_root / "gear_sonic_deploy" + + print("=" * 56) + print(" GEAR-SONIC — Hugging Face Model Downloader") + print(f" Repository : {REPO_ID}") + print(f" Output dir : {output_dir}") + print("=" * 56) + + print("\n[Policy]") + for hf_filename, local_rel in POLICY_FILES: + download_file(hf_hub_download, REPO_ID, hf_filename, output_dir / local_rel, token=args.token) + + if not args.no_planner: + print("\n[Planner]") + hf_filename, local_rel = PLANNER_FILE + download_file(hf_hub_download, REPO_ID, hf_filename, output_dir / local_rel, token=args.token) + + print("\n" + "=" * 56) + print(" Done! Files saved under:") + print(f" {output_dir}") + print("=" * 56) + + +if __name__ == "__main__": + main() diff --git a/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/CLAUDE.md b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/CLAUDE.md new file mode 100644 index 0000000..6cedca5 --- /dev/null +++ b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/CLAUDE.md @@ -0,0 +1,108 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Project Overview + +This project provides Python bindings for the XRoboToolkit PC Service SDK, enabling Python applications to extract XR state data including controller poses, hand tracking, and body motion capture from XR devices (primarily PICO headsets). + +## Architecture + +The project consists of: + +- **Core C++ Bindings** (`bindings/py_bindings.cpp`): Pybind11-based C++ module that wraps the PXREARobotSDK +- **SDK Integration**: Uses the XRoboToolkit-PC-Service SDK (cloned from external repository) +- **Build System**: CMake-based build with Python setuptools integration +- **Multi-platform Support**: Linux (x86_64/aarch64) and Windows + +Key components: +- `PXREARobotSDK.h`: Main SDK header providing device connectivity and data parsing +- `py_bindings.cpp`: Thread-safe C++ wrapper with mutex-protected global state variables +- JSON parsing using nlohmann/json for device state data +- Callback-based data updates from the SDK + +## Build Commands + +### Ubuntu/Linux Setup and Build +```bash +# Full setup (downloads dependencies and builds) +bash setup_ubuntu.sh + +# Manual build after setup +python setup.py install + +# Clean build artifacts +python setup.py clean +``` + +### Windows Setup and Build +```batch +# Full setup (downloads dependencies and builds) +setup_windows.bat + +# Manual build after setup +python setup.py install +``` + +### Development Commands +```bash +# Uninstall existing package +pip uninstall -y xrobotoolkit_sdk + +# Install pybind11 dependency +conda install -c conda-forge pybind11 +# or +pip install pybind11 + +# Build and install +python setup.py install +``` + +## Data Flow and Threading + +The SDK uses a callback-based architecture: +- `OnPXREAClientCallback`: Main callback function that receives JSON data from connected devices +- Global state variables (poses, button states, etc.) are updated in real-time +- Thread-safe access via mutex locks for each data category +- Data parsing from comma-separated pose strings to arrays + +## Key Functions and Data Types + +### Controller Data +- Poses: `std::array` (x,y,z,qx,qy,qz,qw) +- Buttons: Menu, Primary, Secondary, Axis Click +- Analog: Trigger, Grip, Axis (x,y) + +### Hand Tracking +- 26 joints per hand with 7 values each (position + quaternion) +- Hand scale factor + +### Body Tracking +- 24 body joints with pose, velocity, acceleration data +- IMU timestamps for each joint +- Availability flag for body tracking system + +## Dependencies + +### Required +- pybind11 (Python binding framework) +- CMake (build system) +- XRoboToolkit-PC-Service SDK (automatically downloaded during setup) + +### Platform-specific Libraries +- Linux: `libPXREARobotSDK.so` +- Windows: `PXREARobotSDK.dll` and `PXREARobotSDK.lib` + +## Testing + +No formal test suite is included. Test functionality using the example scripts in `examples/`: +- `example.py`: Basic controller and headset pose testing +- `example_body_tracking.py`: Body tracking functionality +- `run_binding_continuous.py`: Continuous data capture + +## Important Notes + +- The SDK requires active XR device connection (PICO headset) +- Body tracking requires at least two Pico Swift devices +- All data access is thread-safe but real-time dependent on device connectivity +- The project builds a Python extension module that must be installed to site-packages \ No newline at end of file diff --git a/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/CMakeLists.txt b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/CMakeLists.txt new file mode 100644 index 0000000..654b7b6 --- /dev/null +++ b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/CMakeLists.txt @@ -0,0 +1,54 @@ +cmake_minimum_required(VERSION 3.14) + +project(MyPybind11Project LANGUAGES CXX) + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +cmake_host_system_information(RESULT ISA_NAME QUERY OS_PLATFORM) # Added: Important for UNIX specific logic +message(STATUS "OS_PLATFORM (ISA_NAME): ${ISA_NAME}") + +include(GNUInstallDirs) # Add this line +find_package(pybind11 REQUIRED) + +# Python Bindings for py_bindings.cpp +pybind11_add_module(xrobotoolkit_sdk MODULE bindings/py_bindings.cpp) + +# Link xrobotoolkit_sdk module against pybind11 +target_link_libraries(xrobotoolkit_sdk PRIVATE pybind11::module) + +# Add include directories and link libraries for PXREARobotSDK to xrobotoolkit_sdk target +if(WIN32) + target_include_directories(xrobotoolkit_sdk PUBLIC + ${PROJECT_SOURCE_DIR}/include + ) + target_link_directories(xrobotoolkit_sdk PUBLIC ${PROJECT_SOURCE_DIR}/lib) + target_link_libraries(xrobotoolkit_sdk PUBLIC + PXREARobotSDK.dll # Assuming this is how PXREARobotSDK is linked, mirroring ConsoleDemo + ) +endif() + +if(UNIX) + # ISA_NAME is set by cmake_host_system_information above + if(ISA_NAME STREQUAL "aarch64") + target_include_directories(xrobotoolkit_sdk PUBLIC + ${PROJECT_SOURCE_DIR}/include/aarch64 + ) + target_link_directories(xrobotoolkit_sdk PUBLIC ${PROJECT_SOURCE_DIR}/lib/aarch64) + else() + target_include_directories(xrobotoolkit_sdk PUBLIC + ${PROJECT_SOURCE_DIR}/include + ) + target_link_directories(xrobotoolkit_sdk PUBLIC ${PROJECT_SOURCE_DIR}/lib) + endif() + target_link_libraries(xrobotoolkit_sdk PUBLIC + PXREARobotSDK + ) +endif() + +# Install the Python module +install(TARGETS xrobotoolkit_sdk + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} # Installs to /lib + # You might want a Python-specific path like: + # DESTINATION lib/python${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}/site-packages +) diff --git a/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/LICENSE b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/LICENSE new file mode 100644 index 0000000..c0973fd --- /dev/null +++ b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 XR Robotics + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/README.md b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/README.md new file mode 100644 index 0000000..9d7a937 --- /dev/null +++ b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/README.md @@ -0,0 +1,169 @@ +# XRoboToolkit-PC-Service-Pybind + +This project provides a python interface to extract XR state using XRoboToolkit-PC-Service sdk. + +## Requirements + +- [`pybind11`](https://github.com/pybind/pybind11) +- [`XRoboRoolkit PC Service`](https://github.com/XR-Robotics/XRoboToolkit-PC-Service#) + +## Building the Project +### Ubuntu 22.04 + +``` +conda remove --name xr --all +conda create -n xr python=3.10 +conda activate xr + +mkdir -p tmp +cd tmp +git clone https://github.com/XR-Robotics/XRoboToolkit-PC-Service.git +cd XRoboToolkit-PC-Service/RoboticsService/PXREARobotSDK +bash build.sh +cd ../../../.. + +mkdir -p lib +mkdir -p include +cp tmp/XRoboToolkit-PC-Service/RoboticsService/PXREARobotSDK/PXREARobotSDK.h include/ +cp -r tmp/XRoboToolkit-PC-Service/RoboticsService/PXREARobotSDK/nlohmann include/nlohmann/ +cp tmp/XRoboToolkit-PC-Service/RoboticsService/PXREARobotSDK/build/libPXREARobotSDK.so lib/ +# rm -rf tmp + +# Build the project +conda install -c conda-forge pybind11 + +pip uninstall -y xrobotoolkit_sdk +python setup.py install +``` +### Linux Ubuntu 22.04 arm64 version (Nvidia orin supported) +``` +bash setup_orin.sh +``` +### Windows + +**Ensure pybind11 is installed before running the following command.** + +``` +setup_windows.bat +``` + +## Using the Python Bindings + +**1. Get Controller and Headset Poses** + +```python +import xrobotoolkit_sdk as xrt + +xrt.init() + +left_pose = xrt.get_left_controller_pose() +right_pose = xrt.get_right_controller_pose() +headset_pose = xrt.get_headset_pose() + +print(f"Left Controller Pose: {left_pose}") +print(f"Right Controller Pose: {right_pose}") +print(f"Headset Pose: {headset_pose}") + +xrt.close() +``` + +**2. Get Controller Inputs (Triggers, Grips, Buttons, Axes)** + +```python +import xrobotoolkit_sdk as xrt + +xrt.init() + +# Triggers and Grips +left_trigger = xrt.get_left_trigger() +right_grip = xrt.get_right_grip() +print(f"Left Trigger: {left_trigger}, Right Grip: {right_grip}") + +# Buttons +a_button_pressed = xrt.get_A_button() +x_button_pressed = xrt.get_X_button() +print(f"A Button Pressed: {a_button_pressed}, X Button Pressed: {x_button_pressed}") + +# Axes +left_axis = xrt.get_left_axis() +right_axis_click = xrt.get_right_axis_click() +print(f"Left Axis: {left_axis}, Right Axis Clicked: {right_axis_click}") + +# Timestamp +timestamp = xrt.get_time_stamp_ns() +print(f"Current Timestamp (ns): {timestamp}") + +xrt.close() +``` + +**3. Get hand tracking state** +```python +import xrobotoolkit_sdk as xrt + +xrt.init() + +# Left Hand State +left_hand_tracking_state = xrt.get_left_hand_tracking_state() +print(f"Left Hand State: {left_hand_tracking_state}") + +# Left Hand isActive +left_hand_is_active = xrt.get_left_hand_is_active() +print(f"Left Hand isActive: {left_hand_is_active}") + +# Right Hand State +right_hand_tracking_state = xrt.get_right_hand_tracking_state() +print(f"Right Hand State: {right_hand_tracking_state}") + +# Right Hand isActive +right_hand_is_active = xrt.get_right_hand_is_active() +print(f"Right Hand isActive: {right_hand_is_active}") + +xrt.close() +``` + +**4. Get whole body motion tracking (please refer to this example when check Full Body tracking mode in UNITY app)** +```python +import xrobotoolkit_sdk as xrt + +xrt.init() + +# Check if body tracking data is available +if xrt.is_body_data_available(): + # Get body joint poses (24 joints, 7 values each: x,y,z,qx,qy,qz,qw) + body_poses = xrt.get_body_joints_pose() + print(f"Body joints pose data: {body_poses}") + + # Get body joint velocities (24 joints, 6 values each: vx,vy,vz,wx,wy,wz) + body_velocities = xrt.get_body_joints_velocity() + print(f"Body joints velocity data: {body_velocities}") + + # Get body joint accelerations (24 joints, 6 values each: ax,ay,az,wax,way,waz) + body_accelerations = xrt.get_body_joints_acceleration() + print(f"Body joints acceleration data: {body_accelerations}") + + # Get IMU timestamps for each joint + imu_timestamps = xrt.get_body_joints_timestamp() + print(f"IMU timestamps: {imu_timestamps}") + + # Get body data timestamp + body_timestamp = xrt.get_body_timestamp_ns() + print(f"Body data timestamp: {body_timestamp}") + + # Example: Get specific joint data (Head joint is index 15) + head_pose = body_poses[15] # Head joint + x, y, z, qx, qy, qz, qw = head_pose + print(f"Head pose: Position({x:.3f}, {y:.3f}, {z:.3f}) Rotation({qx:.3f}, {qy:.3f}, {qz:.3f}, {qw:.3f})") +else: + print("Body tracking data not available. Make sure:") + print("1. PICO headset is connected") + print("2. Body tracking is enabled in the control panel") + print("3. At least two Pico Swift devices are connected and calibrated") + +xrt.close() +``` + +**Body Joint Indices (similar to SMPL, 24 joints total):** +- 0: Pelvis, 1: Left Hip, 2: Right Hip, 3: Spine1, 4: Left Knee, 5: Right Knee +- 6: Spine2, 7: Left Ankle, 8: Right Ankle, 9: Spine3, 10: Left Foot, 11: Right Foot +- 12: Neck, 13: Left Collar, 14: Right Collar, 15: Head, 16: Left Shoulder, 17: Right Shoulder +- 18: Left Elbow, 19: Right Elbow, 20: Left Wrist, 21: Right Wrist, 22: Left Hand, 23: Right Hand diff --git a/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/bindings/py_bindings.cpp b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/bindings/py_bindings.cpp new file mode 100644 index 0000000..4f7c9d9 --- /dev/null +++ b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/bindings/py_bindings.cpp @@ -0,0 +1,546 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "PXREARobotSDK.h" + + +using json = nlohmann::json; + +std::array LeftControllerPose; +std::array RightControllerPose; +std::array HeadsetPose; + +std::array, 26> LeftHandTrackingState; +double LeftHandScale = 1.0; +int LeftHandIsActive = 0; +std::array, 26> RightHandTrackingState; +double RightHandScale = 1.0; +int RightHandIsActive = 0; + +// Whole body motion data - 24 joints for body tracking +std::array, 24> BodyJointsPose; // Position and rotation for each joint +std::array, 24> BodyJointsVelocity; // Velocity and angular velocity for each joint +std::array, 24> BodyJointsAcceleration; // Acceleration and angular acceleration for each joint +std::array BodyJointsTimestamp; // IMU timestamp for each joint +int64_t BodyTimeStampNs = 0; // Body data timestamp +bool BodyDataAvailable = false; // Flag to indicate if body data is available + +std::array, 3> MotionTrackerPose; // Position and rotation for each joint +std::array, 3> MotionTrackerVelocity; // Velocity and angular velocity for each joint +std::array, 3> MotionTrackerAcceleration; // Acceleration and angular acceleration for each joint +std::array MotionTrackerSerialNumbers; // Serial numbers of the motion trackers +int64_t MotionTimeStampNs = 0; // Motion data timestamp +int NumMotionDataAvailable = 0; // number of motion trackers + + +bool LeftMenuButton; +double LeftTrigger; +double LeftGrip; +std::array LeftAxis{0.0, 0.0}; +bool LeftAxisClick; +bool LeftPrimaryButton; +bool LeftSecondaryButton; + +bool RightMenuButton; +double RightTrigger; +double RightGrip; +std::array RightAxis{0.0, 0.0}; +bool RightAxisClick; +bool RightPrimaryButton; +bool RightSecondaryButton; + +int64_t TimeStampNs; + +std::mutex leftMutex; +std::mutex rightMutex; +std::mutex headsetPoseMutex; +std::mutex timestampMutex; +std::mutex leftHandMutex; +std::mutex rightHandMutex; +std::mutex bodyMutex; // Mutex for body tracking data +std::mutex motionMutex; + + + +std::array stringToPoseArray(const std::string& poseStr) { + std::array result{0}; + std::stringstream ss(poseStr); + std::string value; + int i = 0; + while (std::getline(ss, value, ',') && i < 7) { + result[i++] = std::stod(value); + } + return result; +} + +std::array stringToVelocityArray(const std::string& velocityStr) { + std::array result{0}; + std::stringstream ss(velocityStr); + std::string value; + int i = 0; + while (std::getline(ss, value, ',') && i < 6) { + result[i++] = std::stod(value); + } + return result; +} + +void OnPXREAClientCallback(void* context, PXREAClientCallbackType type, int status, void* userData) +{ + switch (type) + { + case PXREAServerConnect: + std::cout << "server connect\n" << std::endl; + break; + case PXREAServerDisconnect: + std::cout << "server disconnect\n" << std::endl; + break; + case PXREADeviceFind: + std::cout << "device found\n" << (const char*)userData << std::endl; + break; + case PXREADeviceMissing: + std::cout << "device missing\n" << (const char*)userData << std::endl; + break; + case PXREADeviceConnect: + std::cout << "device connect\n" << (const char*)userData << status << std::endl; + break; + case PXREADeviceStateJson: + auto& dsj = *((PXREADevStateJson*)userData); + + + try { + json data = json::parse(dsj.stateJson); + if (data.contains("value")) { + auto value = json::parse(data["value"].get()); + if (value["Controller"].contains("left")) { + auto& left = value["Controller"]["left"]; + { + std::lock_guard lock(leftMutex); + LeftControllerPose = stringToPoseArray(left["pose"].get()); + LeftTrigger = left["trigger"].get(); + LeftGrip = left["grip"].get(); + LeftMenuButton = left["menuButton"].get(); + LeftAxis[0] = left["axisX"].get(); + LeftAxis[1] = left["axisY"].get(); + LeftAxisClick = left["axisClick"].get(); + LeftPrimaryButton = left["primaryButton"].get(); + LeftSecondaryButton = left["secondaryButton"].get(); + } + } + if (value["Controller"].contains("right")) { + auto& right = value["Controller"]["right"]; + { + std::lock_guard lock(rightMutex); + RightControllerPose = stringToPoseArray(right["pose"].get()); + RightTrigger = right["trigger"].get(); + RightGrip = right["grip"].get(); + RightMenuButton = right["menuButton"].get(); + RightAxis[0] = right["axisX"].get(); + RightAxis[1] = right["axisY"].get(); + RightAxisClick = right["axisClick"].get(); + RightPrimaryButton = right["primaryButton"].get(); + RightSecondaryButton = right["secondaryButton"].get(); + } + } + if (value.contains("Head")) { + auto& headset = value["Head"]; + { + std::lock_guard lock(headsetPoseMutex); + HeadsetPose = stringToPoseArray(headset["pose"].get()); + } + } + if (value.contains("timeStampNs")) { + std::lock_guard lock(timestampMutex); + TimeStampNs = value["timeStampNs"].get(); + } + if (value["Hand"].contains("leftHand")) { + auto& leftHand = value["Hand"]["leftHand"]; + { + std::lock_guard lock(leftHandMutex); + + LeftHandScale = leftHand["scale"].get(); + LeftHandIsActive = leftHand["isActive"].get(); + for (int i = 0; i < 26; i++) { + LeftHandTrackingState[i] = stringToPoseArray(leftHand["HandJointLocations"][i]["p"].get()); + } + } + } + if (value["Hand"].contains("rightHand")) { + auto& rightHand = value["Hand"]["rightHand"]; + { + std::lock_guard lock(rightHandMutex); + RightHandScale = rightHand["scale"].get(); + RightHandIsActive = rightHand["isActive"].get(); + for (int i = 0; i < 26; i++) { + RightHandTrackingState[i] = stringToPoseArray(rightHand["HandJointLocations"][i]["p"].get()); + } + } + } + // Parse Body data for whole body motion capture + if (value.contains("Body")) { + auto& body = value["Body"]; + { + std::lock_guard lock(bodyMutex); + + if (body.contains("timeStampNs")) { + BodyTimeStampNs = body["timeStampNs"].get(); + } + + if (body.contains("joints") && body["joints"].is_array()) { + auto joints = body["joints"]; + int jointCount = std::min(static_cast(joints.size()), 24); + + for (int i = 0; i < jointCount; i++) { + auto& joint = joints[i]; + + // Parse pose (position and rotation) + if (joint.contains("p")) { + BodyJointsPose[i] = stringToPoseArray(joint["p"].get()); + } + + // Parse velocity and angular velocity + if (joint.contains("va")) { + BodyJointsVelocity[i] = stringToVelocityArray(joint["va"].get()); + } + + // Parse acceleration and angular acceleration + if (joint.contains("wva")) { + BodyJointsAcceleration[i] = stringToVelocityArray(joint["wva"].get()); + } + + // Parse IMU timestamp + if (joint.contains("t")) { + BodyJointsTimestamp[i] = joint["t"].get(); + } + } + + BodyDataAvailable = true; + } + } + } + //parse individual tracker data + if (value.contains("Motion")) { + auto& motion = value["Motion"]; + { + std::lock_guard lock(motionMutex); + if (motion.contains("timeStampNs")) { + MotionTimeStampNs = motion["timeStampNs"].get(); + } + if (motion.contains("joints") && motion["joints"].is_array()) { + auto joints = motion["joints"]; + NumMotionDataAvailable = std::min(static_cast(joints.size()), 3); + + for (int i = 0; i < NumMotionDataAvailable; i++) { + auto& joint = joints[i]; + + // Parse pose (position and rotation) + if (joint.contains("p")) { + MotionTrackerPose[i] = stringToPoseArray(joint["p"].get()); + } + + // Parse velocity and angular velocity + if (joint.contains("va")) { + MotionTrackerVelocity[i] = stringToVelocityArray(joint["va"].get()); + } + + // Parse acceleration and angular acceleration + if (joint.contains("wva")) { + MotionTrackerAcceleration[i] = stringToVelocityArray(joint["wva"].get()); + } + + if (joint.contains("sn")) { + MotionTrackerSerialNumbers[i] = joint["sn"].get(); + } + } + + } + } + } + } + } catch (const json::exception& e) { + std::cerr << "JSON parsing error: " << e.what() << std::endl; + } + break; + } +} + +void init() { + if (PXREAInit(NULL, OnPXREAClientCallback, PXREAFullMask) != 0) { + throw std::runtime_error("PXREAInit failed"); + } +} + +void deinit() { + PXREADeinit(); +} + +std::array getLeftControllerPose() { + std::lock_guard lock(leftMutex); + return LeftControllerPose; +} + +std::array getRightControllerPose() { + std::lock_guard lock(rightMutex); + return RightControllerPose; +} + +std::array getHeadsetPose() { + std::lock_guard lock(headsetPoseMutex); + return HeadsetPose; +} + +double getLeftTrigger() { + std::lock_guard lock(leftMutex); + return LeftTrigger; +} + +double getLeftGrip() { + std::lock_guard lock(leftMutex); + return LeftGrip; +} + +double getRightTrigger() { + std::lock_guard lock(rightMutex); + return RightTrigger; +} + +double getRightGrip() { + std::lock_guard lock(rightMutex); + return RightGrip; +} + +bool getLeftMenuButton() { + std::lock_guard lock(leftMutex); + return LeftMenuButton; +} + +bool getRightMenuButton() { + std::lock_guard lock(rightMutex); + return RightMenuButton; +} + +bool getLeftAxisClick() { + std::lock_guard lock(leftMutex); + return LeftAxisClick; +} + +bool getRightAxisClick() { + std::lock_guard lock(rightMutex); + return RightAxisClick; +} + +std::array getLeftAxis() { + std::lock_guard lock(leftMutex); + return LeftAxis; +} + + +std::array getRightAxis() { + std::lock_guard lock(rightMutex); + return RightAxis; +} + +bool getLeftPrimaryButton() { + std::lock_guard lock(leftMutex); + return LeftPrimaryButton; +} + +bool getRightPrimaryButton() { + std::lock_guard lock(rightMutex); + return RightPrimaryButton; +} + +bool getLeftSecondaryButton() { + std::lock_guard lock(leftMutex); + return LeftSecondaryButton; +} + +bool getRightSecondaryButton() { + std::lock_guard lock(rightMutex); + return RightSecondaryButton; +} + +int64_t getTimeStampNs() { + std::lock_guard lock(timestampMutex); + return TimeStampNs; +} + +std::array, 26> getLeftHandTrackingState() { + std::lock_guard lock(leftHandMutex); + return LeftHandTrackingState; +} + +int getLeftHandScale() { + std::lock_guard lock(leftHandMutex); + return LeftHandScale; +} + +int getLeftHandIsActive() { + std::lock_guard lock(leftHandMutex); + return LeftHandIsActive; +} + +std::array, 26> getRightHandTrackingState() { + std::lock_guard lock(rightHandMutex); + return RightHandTrackingState; +} + +int getRightHandScale() { + std::lock_guard lock(rightHandMutex); + return RightHandScale; +} + +int getRightHandIsActive() { + std::lock_guard lock(rightHandMutex); + return RightHandIsActive; +} + +// Body tracking functions +bool isBodyDataAvailable() { + std::lock_guard lock(bodyMutex); + return BodyDataAvailable; +} + +std::array, 24> getBodyJointsPose() { + std::lock_guard lock(bodyMutex); + return BodyJointsPose; +} + +std::array, 24> getBodyJointsVelocity() { + std::lock_guard lock(bodyMutex); + return BodyJointsVelocity; +} + +std::array, 24> getBodyJointsAcceleration() { + std::lock_guard lock(bodyMutex); + return BodyJointsAcceleration; +} + +std::array getBodyJointsTimestamp() { + std::lock_guard lock(bodyMutex); + return BodyJointsTimestamp; +} + +int64_t getBodyTimeStampNs() { + std::lock_guard lock(bodyMutex); + return BodyTimeStampNs; +} + +int numMotionDataAvailable() { + std::lock_guard lock(motionMutex); + return NumMotionDataAvailable; +} + +std::vector> getMotionTrackerPose() { + std::lock_guard lock(motionMutex); + std::vector> result; + for (int i = 0; i < NumMotionDataAvailable; i++) { + result.push_back(MotionTrackerPose[i]); + } + return result; +} + +std::vector> getMotionTrackerVelocity() { + std::lock_guard lock(motionMutex); + std::vector> result; + for (int i = 0; i < NumMotionDataAvailable; i++) { + result.push_back(MotionTrackerVelocity[i]); + } + return result; +} + +std::vector> getMotionTrackerAcceleration() { + std::lock_guard lock(motionMutex); + std::vector> result; + for (int i = 0; i < NumMotionDataAvailable; i++) { + result.push_back(MotionTrackerAcceleration[i]); + } + return result; +} + +std::vector getMotionTrackerSerialNumbers() { + std::lock_guard lock(motionMutex); + std::vector result; + for (int i = 0; i < NumMotionDataAvailable; i++) { + result.push_back(MotionTrackerSerialNumbers[i]); + } + return result; +} + +int64_t getMotionTimeStampNs() { + std::lock_guard lock(motionMutex); + return MotionTimeStampNs; +} + +int DeviceControlJsonWrapper(const std::string& dev_id, const std::string& json_str) { + const int rc = PXREADeviceControlJson(dev_id.c_str(), json_str.c_str()); + if (rc != 0) { + throw std::runtime_error("device_control_json failed"); + } + return rc; // 0 +} + +int SendBytesToDeviceWrapper(const std::string& dev_id, pybind11::bytes blob) { + std::string s = blob; // copy Python bytes to std::string + const int rc = PXREASendBytesToDevice(dev_id.c_str(), s.data(), static_cast(s.size())); + if (rc != 0) { + throw std::runtime_error("send_bytes_to_device failed"); + } + return rc; // 0 +} + + +PYBIND11_MODULE(xrobotoolkit_sdk, m) { + m.def("init", &init, "Initialize the PXREARobot SDK."); + m.def("close", &deinit, "Deinitialize the PXREARobot SDK."); + m.def("get_left_controller_pose", &getLeftControllerPose, "Get the left controller pose."); + m.def("get_right_controller_pose", &getRightControllerPose, "Get the right controller pose."); + m.def("get_headset_pose", &getHeadsetPose, "Get the headset pose."); + m.def("get_left_trigger", &getLeftTrigger, "Get the left trigger value."); + m.def("get_left_grip", &getLeftGrip, "Get the left grip value."); + m.def("get_right_trigger", &getRightTrigger, "Get the right trigger value."); + m.def("get_right_grip", &getRightGrip, "Get the right grip value."); + m.def("get_left_menu_button", &getLeftMenuButton, "Get the left menu button state."); + m.def("get_right_menu_button", &getRightMenuButton, "Get the right menu button state."); + m.def("get_left_axis_click", &getLeftAxisClick, "Get the left axis click state."); + m.def("get_right_axis_click", &getRightAxisClick, "Get the right axis click state."); + m.def("get_left_axis", &getLeftAxis, "Get the left axis values (x, y)."); + m.def("get_right_axis", &getRightAxis, "Get the right axis values (x, y)."); + m.def("get_X_button", &getLeftPrimaryButton, "Get the left primary button state."); + m.def("get_A_button", &getRightPrimaryButton, "Get the right primary button state."); + m.def("get_Y_button", &getLeftSecondaryButton, "Get the left secondary button state."); + m.def("get_B_button", &getRightSecondaryButton, "Get the right secondary button state."); + m.def("get_time_stamp_ns", &getTimeStampNs, "Get the timestamp in nanoseconds."); + m.def("get_left_hand_tracking_state", &getLeftHandTrackingState, "Get the left hand state."); + m.def("get_right_hand_tracking_state", &getRightHandTrackingState, "Get the right hand state."); + m.def("get_left_hand_is_active", &getLeftHandIsActive, "Get the left hand tracking quality (0 = low, 1 = high)."); + m.def("get_right_hand_is_active", &getRightHandIsActive, "Get the right hand tracking quality (0 = low, 1 = high)."); + + // Body tracking functions + m.def("is_body_data_available", &isBodyDataAvailable, "Check if body tracking data is available."); + m.def("get_body_joints_pose", &getBodyJointsPose, "Get the body joints pose data (24 joints, 7 values each: x,y,z,qx,qy,qz,qw)."); + m.def("get_body_joints_velocity", &getBodyJointsVelocity, "Get the body joints velocity data (24 joints, 6 values each: vx,vy,vz,wx,wy,wz)."); + m.def("get_body_joints_acceleration", &getBodyJointsAcceleration, "Get the body joints acceleration data (24 joints, 6 values each: ax,ay,az,wax,way,waz)."); + m.def("get_body_joints_timestamp", &getBodyJointsTimestamp, "Get the body joints IMU timestamp data (24 joints)."); + m.def("get_body_timestamp_ns", &getBodyTimeStampNs, "Get the body data timestamp in nanoseconds."); + + // Motion tracker functions + m.def("num_motion_data_available", &numMotionDataAvailable, "Check if motion tracker data is available."); + m.def("get_motion_tracker_pose", &getMotionTrackerPose, "Get the motion tracker pose data (3 trackers, 7 values each: x,y,z,qx,qy,qz,qw)."); + m.def("get_motion_tracker_velocity", &getMotionTrackerVelocity, "Get the motion tracker velocity data (3 trackers, 6 values each: vx,vy,vz,wx,wy,wz)."); + m.def("get_motion_tracker_acceleration", &getMotionTrackerAcceleration, "Get the motion tracker acceleration data (3 trackers, 6 values each: ax,ay,az,wax,way,waz)."); + m.def("get_motion_tracker_serial_numbers", &getMotionTrackerSerialNumbers, "Get the serial numbers of the motion trackers."); + m.def("get_motion_timestamp_ns", &getMotionTimeStampNs, "Get the motion data timestamp in nanoseconds."); + + + // send json bytes functions + m.def("device_control_json", &DeviceControlJsonWrapper, "Send a JSON control command to a device"); + m.def("send_bytes_to_device", &SendBytesToDeviceWrapper, "Send raw bytes to a device"); + + m.doc() = "Python bindings for PXREARobot SDK using pybind11."; +} \ No newline at end of file diff --git a/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/examples/example.py b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/examples/example.py new file mode 100644 index 0000000..52ddbdb --- /dev/null +++ b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/examples/example.py @@ -0,0 +1,47 @@ +# 1. Get Controller and Headset Poses + +import xrobotoolkit_sdk as xrt + +xrt.init() + +left_pose = xrt.get_left_controller_pose() +right_pose = xrt.get_right_controller_pose() +headset_pose = xrt.get_headset_pose() + +print(f"Left Controller Pose: {left_pose}") +print(f"Right Controller Pose: {right_pose}") +print(f"Headset Pose: {headset_pose}") + + +# 2. Get Controller Inputs (Triggers, Grips, Buttons, Axes)** + + + +# Triggers and Grips +left_trigger = xrt.get_left_trigger() +right_grip = xrt.get_right_grip() +print(f"Left Trigger: {left_trigger}, Right Grip: {right_grip}") + +# Buttons +a_button_pressed = xrt.get_A_button() +x_button_pressed = xrt.get_X_button() +print(f"A Button Pressed: {a_button_pressed}, X Button Pressed: {x_button_pressed}") + +# Axes +left_axis = xrt.get_left_axis() +right_axis_click = xrt.get_right_axis_click() +print(f"Left Axis: {left_axis}, Right Axis Clicked: {right_axis_click}") + +# Timestamp +timestamp = xrt.get_time_stamp_ns() +print(f"Current Timestamp (ns): {timestamp}") + + +# 3. Get hand tracking state + +# Left Hand State +left_hand_tracking_state = xrt.get_left_hand_tracking_state() +print(f"Left Hand State: {left_hand_tracking_state}") +# Right Hand State +right_hand_tracking_state = xrt.get_right_hand_tracking_state() +print(f"Right Hand State: {right_hand_tracking_state}") diff --git a/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/examples/example_body_tracking.py b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/examples/example_body_tracking.py new file mode 100644 index 0000000..289eadd --- /dev/null +++ b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/examples/example_body_tracking.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python3 +""" +Example script demonstrating whole body motion tracking with XRoboToolkit SDK. + +This script shows how to: +1. Check if body tracking data is available +2. Get body joint poses (position and rotation) +3. Get body joint velocities and accelerations +4. Get IMU timestamps for each joint +5. Get body data timestamp +6. Save data to structured format (pickle/json) + +Body Joint Indices (24 joints total): +0: Pelvis, 1: Left Hip, 2: Right Hip, 3: Spine1, 4: Left Knee, 5: Right Knee, +6: Spine2, 7: Left Ankle, 8: Right Ankle, 9: Spine3, 10: Left Foot, 11: Right Foot, +12: Neck, 13: Left Collar, 14: Right Collar, 15: Head, 16: Left Shoulder, 17: Right Shoulder, +18: Left Elbow, 19: Right Elbow, 20: Left Wrist, 21: Right Wrist, 22: Left Hand, 23: Right Hand +""" + +import xrobotoolkit_sdk as xrt +import time +import argparse +import csv +import os +import json +import pickle +from datetime import datetime + + +def main(): + + xrt.init() + + while not xrt.is_body_data_available(): + time.sleep(0.01) + + if xrt.is_body_data_available(): + print("Body tracking data is available!") + + # Joint names for reference + joint_names = [ + "Pelvis", "Left_Hip", "Right_Hip", "Spine1", "Left_Knee", "Right_Knee", + "Spine2", "Left_Ankle", "Right_Ankle", "Spine3", "Left_Foot", "Right_Foot", + "Neck", "Left_Collar", "Right_Collar", "Head", "Left_Shoulder", "Right_Shoulder", + "Left_Elbow", "Right_Elbow", "Left_Wrist", "Right_Wrist", "Left_Hand", "Right_Hand" + ] + + + body_poses = xrt.get_body_joints_pose() # list of [x, y, z, qx, qy, qz, qw] + body_velocities = xrt.get_body_joints_velocity() # vx, vy, vz, wx, wy, wz + body_accelerations = xrt.get_body_joints_acceleration() # ax, ay, az, wax, way, waz + imu_timestamps = xrt.get_body_joints_timestamp() # list of [timestamp] + body_timestamp = xrt.get_body_timestamp_ns() # timestamp in ns + + saved_data = [] + length = 500 + step_idx = 0 + while len(saved_data) < length: + + # Sample data at specified rate + if xrt.is_body_data_available(): + # Get all body tracking data + body_poses = xrt.get_body_joints_pose() + body_velocities = xrt.get_body_joints_velocity() + body_accelerations = xrt.get_body_joints_acceleration() + imu_timestamps = xrt.get_body_joints_timestamp() + body_timestamp = xrt.get_body_timestamp_ns() + + + body_pose_dict = {} + for i, joint_name in enumerate(joint_names): + pos = [body_poses[i][0], body_poses[i][1], body_poses[i][2]] + rot = [body_poses[i][6], body_poses[i][3], body_poses[i][4], body_poses[i][5]] # scalar first + body_pose_dict[joint_name] = [pos, rot] + + saved_data.append(body_pose_dict) + step_idx += 1 + time.sleep(1/50) + print(f"Step {step_idx} of {length}") + + with open('body_tracking_data.pkl', 'wb') as f: + pickle.dump(saved_data, f) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/examples/example_motion_tracker.py b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/examples/example_motion_tracker.py new file mode 100644 index 0000000..c054972 --- /dev/null +++ b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/examples/example_motion_tracker.py @@ -0,0 +1,17 @@ +import xrobotoolkit_sdk as xrt + +xrt.init() +num_motion_data = xrt.num_motion_data_available() +print(f"Number of Motion Trackers: {num_motion_data}") +if num_motion_data > 0: + motion_tracker_pose = xrt.get_motion_tracker_pose() + motion_tracker_velocity = xrt.get_motion_tracker_velocity() + motion_tracker_acceleration = xrt.get_motion_tracker_acceleration() + motion_tracker_serial_numbers = xrt.get_motion_tracker_serial_numbers() + motion_timestamp_ns = xrt.get_motion_timestamp_ns() + + print(f"Motion Tracker Pose: {motion_tracker_pose}") + print(f"Motion Tracker Velocity: {motion_tracker_velocity}") + print(f"Motion Tracker Acceleration: {motion_tracker_acceleration}") + print(f"Motion Tracker Serial Numbers: {motion_tracker_serial_numbers}") + print(f"Motion Timestamp (ns): {motion_timestamp_ns}") diff --git a/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/examples/run_binding_continuous.py b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/examples/run_binding_continuous.py new file mode 100644 index 0000000..b313217 --- /dev/null +++ b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/examples/run_binding_continuous.py @@ -0,0 +1,111 @@ +import sys +import time + +import xrobotoolkit_sdk as xrt + + +def run_tests(): + print("Starting Python binding test...") + + try: + print("Initializing SDK...") + xrt.init() + print("SDK Initialized successfully.") + + print("\n--- Testing all functions for 10 iterations ---") + for i in range(100): + print(f"\n--- Iteration {i+1} ---") + + # Poses + left_pose = xrt.get_left_controller_pose() + right_pose = xrt.get_right_controller_pose() + headset_pose = xrt.get_headset_pose() + print(f"Left Controller Pose: {left_pose}") + print(f"Right Controller Pose: {right_pose}") + print(f"Headset Pose: {headset_pose}") + + # Triggers + left_trigger = xrt.get_left_trigger() + right_trigger = xrt.get_right_trigger() + print(f"Left Trigger: {left_trigger}") + print(f"Right Trigger: {right_trigger}") + + # Grips + left_grip = xrt.get_left_grip() + right_grip = xrt.get_right_grip() + print(f"Left Grip: {left_grip}") + print(f"Right Grip: {right_grip}") + + # Menu Buttons + left_menu = xrt.get_left_menu_button() + right_menu = xrt.get_right_menu_button() + print(f"Left Menu Button: {left_menu}") + print(f"Right Menu Button: {right_menu}") + + # Axis Clicks + left_axis_click = xrt.get_left_axis_click() + right_axis_click = xrt.get_right_axis_click() + print(f"Left Axis Click: {left_axis_click}") + print(f"Right Axis Click: {right_axis_click}") + + # Axes + left_axis = xrt.get_left_axis() + right_axis = xrt.get_right_axis() + print(f"Left Axis (X, Y): {left_axis}") + print(f"Right Axis (X, Y): {right_axis}") + + # Primary Buttons (X, A) + x_button = xrt.get_X_button() # Left Primary + a_button = xrt.get_A_button() # Right Primary + print(f"X Button (Left Primary): {x_button}") + print(f"A Button (Right Primary): {a_button}") + + # Secondary Buttons (Y, B) + y_button = xrt.get_Y_button() # Left Secondary + b_button = xrt.get_B_button() # Right Secondary + print(f"Y Button (Left Secondary): {y_button}") + print(f"B Button (Right Secondary): {b_button}") + + # Left Hand State + left_hand_state = xrt.get_left_hand_tracking_state() + print(f"Left Hand State: {left_hand_state}") + # Right Hand State + right_hand_state = xrt.get_right_hand_tracking_state() + print(f"Right Hand State: {right_hand_state}") + + # Timestamp + timestamp = xrt.get_time_stamp_ns() + print(f"Timestamp (ns): {timestamp}") + + num_motion_data = xrt.num_motion_data_available() + print(f"Number of Motion Trackers: {num_motion_data}") + if num_motion_data > 0: + motion_tracker_pose = xrt.get_motion_tracker_pose() + motion_tracker_velocity = xrt.get_motion_tracker_velocity() + motion_tracker_acceleration = xrt.get_motion_tracker_acceleration() + motion_tracker_serial_numbers = xrt.get_motion_tracker_serial_numbers() + motion_timestamp_ns = xrt.get_motion_timestamp_ns() + + print(f"Motion Tracker Pose: {motion_tracker_pose}") + print(f"Motion Tracker Velocity: {motion_tracker_velocity}") + print(f"Motion Tracker Acceleration: {motion_tracker_acceleration}") + print(f"Motion Tracker Serial Numbers: {motion_tracker_serial_numbers}") + print(f"Motion Timestamp (ns): {motion_timestamp_ns}") + + time.sleep(0.5) # Wait for 0.5 seconds before the next iteration + + print("\nAll iterations complete.") + + except RuntimeError as e: + print(f"Runtime Error: {e}", file=sys.stderr) + except Exception as e: + print(f"An unexpected error occurred: {e}", file=sys.stderr) + finally: + print("\nClosing SDK...") + xrt.close() + print("SDK closed.") + print("Test finished.") + + +if __name__ == "__main__": + run_tests() diff --git a/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/examples/send_json_example.py b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/examples/send_json_example.py new file mode 100644 index 0000000..5d515e7 --- /dev/null +++ b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/examples/send_json_example.py @@ -0,0 +1,29 @@ +import json, time +import xrobotoolkit_sdk as xrt +from datetime import datetime + +xrt.init() # 建链、启动心跳与服务端反馈流 + +dev_id = "TestDevice" # your device ID in unity app + +# send JSON + +for i in range(3): + print("start sending") + start = int(time.time() * 1e3) + cmd = {"functionName": "set_robot", "value": {"mode": "teach"}, "timestamp_ns": start} + jsonfile= json.dumps(cmd) + jsontime = int(time.time()*1e3) + xrt.device_control_json(dev_id,jsonfile) + end = int(time.time()*1e3) + + print("json process time", jsontime - start) + + print("current time difference ", end-start) +# send bytes +print("send_bytes_to_device rc =", xrt.send_bytes_to_device(dev_id, b"\xAA\x55\x10\x00")) + + + + + diff --git a/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/examples/test_hand_isactive.py b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/examples/test_hand_isactive.py new file mode 100644 index 0000000..ca1caa1 --- /dev/null +++ b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/examples/test_hand_isactive.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +""" +Test script for hand tracking isActive functionality. + +This script demonstrates how to use the new isActive functions to check +hand tracking quality for both left and right hands. +""" + +import xrobotoolkit_sdk as xrt +import time + +def main(): + try: + # Initialize the SDK + print("Initializing XRoboToolkit SDK...") + xrt.init() + + print("Testing hand tracking isActive functionality...") + print("isActive values: 0 = low quality, 1 = high quality") + print("Press Ctrl+C to stop\n") + + while True: + # Get hand tracking states + left_hand_state = xrt.get_left_hand_tracking_state() + right_hand_state = xrt.get_right_hand_tracking_state() + + # Get hand tracking quality (isActive) + left_hand_active = xrt.get_left_hand_is_active() + right_hand_active = xrt.get_right_hand_is_active() + + + print(f"Left Hand: isActive={left_hand_active}") + print(f"Right Hand: isActive={right_hand_active}") + + # Show hand tracking quality status + left_quality = "HIGH" if left_hand_active == 1 else "LOW" + right_quality = "HIGH" if right_hand_active == 1 else "LOW" + + print(f"Left Hand Quality: {left_quality}") + print(f"Right Hand Quality: {right_quality}") + + # Example of first joint position for reference + if len(left_hand_state) > 0: + left_wrist_pos = left_hand_state[0][:3] # x, y, z + print(f"Left Wrist Position: ({left_wrist_pos[0]:.3f}, {left_wrist_pos[1]:.3f}, {left_wrist_pos[2]:.3f})") + + if len(right_hand_state) > 0: + right_wrist_pos = right_hand_state[0][:3] # x, y, z + print(f"Right Wrist Position: ({right_wrist_pos[0]:.3f}, {right_wrist_pos[1]:.3f}, {right_wrist_pos[2]:.3f})") + + print("-" * 50) + time.sleep(1) + + except KeyboardInterrupt: + print("\nStopping hand tracking test...") + except Exception as e: + print(f"Error: {e}") + finally: + print("Closing SDK...") + xrt.close() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/include/PXREARobotSDK.h b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/include/PXREARobotSDK.h new file mode 100644 index 0000000..955f28d --- /dev/null +++ b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/include/PXREARobotSDK.h @@ -0,0 +1,120 @@ +/** + * @file PXREARobotSDK.h + * @brief Robot SDK header file for client-side communication + */ +#ifndef PXREACLIENTSDK_H +#define PXREACLIENTSDK_H +#ifdef _WIN32 +#if defined(PXREACLIENTSDK_LIBRARY) +# define PXREACLIENTSDK_EXPORT __declspec(dllexport) +#else +# define PXREACLIENTSDK_EXPORT __declspec(dllimport) +#endif +#endif + +#ifdef __linux__ +#if defined(PXREACLIENTSDK_LIBRARY) +# define PXREACLIENTSDK_EXPORT __attribute__((visibility("default"))) +#else +# define PXREACLIENTSDK_EXPORT __attribute__((visibility("default"))) +#endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +enum PXREAClientCallbackType +{ + /// @brief Server connected + PXREAServerConnect = 1<<2, + /// @brief Server disconnected + PXREAServerDisconnect = 1<<3, + /// @brief Device online + PXREADeviceFind = 1<<4, + /// @brief Device offline + PXREADeviceMissing = 1<<5, + /// @brief Device connected + PXREADeviceConnect = 1<<9, + /// @brief Device state in JSON format + PXREADeviceStateJson = 1<<25, + /// @brief Custom message + PXREADeviceCustomMessage = 1<<26, + /// @brief Mask for enabling all callbacks + PXREAFullMask = 0xffffffff +}; + + + + + + + + +/// @brief Device state in JSON format +typedef struct { + /// @brief Device serial number + char devID[32]; + /// @brief JSON string containing device state information + char stateJson[16352]; +}PXREADevStateJson; + + +typedef struct { + /// @brief Device serial number + char devID[32]; + /// @brief Data size + uint64_t dataSize; + /// @brief Data pointer, valid within callback + const char* dataPtr; +}PXREADevCustomMessage; + + + + +/** + * @brief Client callback for receiving server messages + * @param context Callback context, passed from #Init parameter 1 context + * @param type Callback type + * @param status Callback status code + * @param userData Callback data pointer, determined by parameter 2 type + */ +typedef void(*pfPXREAClientCallback)(void* context,PXREAClientCallbackType type,int status,void* userData); + +/** + * @brief SDK initialization interface + * @details Connect to service and register callback + * @param context Callback context for passing user-defined data to callback function + * @param cliCallback Callback function pointer for listening to server messages + * @param mask Callback mask for filtering certain server messages + */ +PXREACLIENTSDK_EXPORT int PXREAInit(void* context,pfPXREAClientCallback cliCallback,unsigned mask); +/** + * @brief Termination interface + * @details Disconnect from service + */ +PXREACLIENTSDK_EXPORT int PXREADeinit(); + +/** + * @brief Send JSON format command to device + * @param devID Device serial number + * @param parameterJson Function and parameters in JSON format, refer to robot SDK documentation for specific usage + * @return 0 Success + * @return -1 Failure + */ +PXREACLIENTSDK_EXPORT int PXREADeviceControlJson(const char *devID,const char *parameterJson); +/** + * @brief Send byte stream to specified device + * @note This command is suitable for SDK caller's custom messages + * @param devID Device serial number + * @param data Starting address of byte stream + * @param len Length of byte stream + * @return 0 Success + * @return -1 Failure + */ +PXREACLIENTSDK_EXPORT int PXREASendBytesToDevice(const char* devID,const char* data,unsigned len); +#ifdef __cplusplus +} +#endif + +#endif // PXREACLIENTSDK_H diff --git a/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/include/aarch64/PXREARobotSDK.h b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/include/aarch64/PXREARobotSDK.h new file mode 100644 index 0000000..955f28d --- /dev/null +++ b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/include/aarch64/PXREARobotSDK.h @@ -0,0 +1,120 @@ +/** + * @file PXREARobotSDK.h + * @brief Robot SDK header file for client-side communication + */ +#ifndef PXREACLIENTSDK_H +#define PXREACLIENTSDK_H +#ifdef _WIN32 +#if defined(PXREACLIENTSDK_LIBRARY) +# define PXREACLIENTSDK_EXPORT __declspec(dllexport) +#else +# define PXREACLIENTSDK_EXPORT __declspec(dllimport) +#endif +#endif + +#ifdef __linux__ +#if defined(PXREACLIENTSDK_LIBRARY) +# define PXREACLIENTSDK_EXPORT __attribute__((visibility("default"))) +#else +# define PXREACLIENTSDK_EXPORT __attribute__((visibility("default"))) +#endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +enum PXREAClientCallbackType +{ + /// @brief Server connected + PXREAServerConnect = 1<<2, + /// @brief Server disconnected + PXREAServerDisconnect = 1<<3, + /// @brief Device online + PXREADeviceFind = 1<<4, + /// @brief Device offline + PXREADeviceMissing = 1<<5, + /// @brief Device connected + PXREADeviceConnect = 1<<9, + /// @brief Device state in JSON format + PXREADeviceStateJson = 1<<25, + /// @brief Custom message + PXREADeviceCustomMessage = 1<<26, + /// @brief Mask for enabling all callbacks + PXREAFullMask = 0xffffffff +}; + + + + + + + + +/// @brief Device state in JSON format +typedef struct { + /// @brief Device serial number + char devID[32]; + /// @brief JSON string containing device state information + char stateJson[16352]; +}PXREADevStateJson; + + +typedef struct { + /// @brief Device serial number + char devID[32]; + /// @brief Data size + uint64_t dataSize; + /// @brief Data pointer, valid within callback + const char* dataPtr; +}PXREADevCustomMessage; + + + + +/** + * @brief Client callback for receiving server messages + * @param context Callback context, passed from #Init parameter 1 context + * @param type Callback type + * @param status Callback status code + * @param userData Callback data pointer, determined by parameter 2 type + */ +typedef void(*pfPXREAClientCallback)(void* context,PXREAClientCallbackType type,int status,void* userData); + +/** + * @brief SDK initialization interface + * @details Connect to service and register callback + * @param context Callback context for passing user-defined data to callback function + * @param cliCallback Callback function pointer for listening to server messages + * @param mask Callback mask for filtering certain server messages + */ +PXREACLIENTSDK_EXPORT int PXREAInit(void* context,pfPXREAClientCallback cliCallback,unsigned mask); +/** + * @brief Termination interface + * @details Disconnect from service + */ +PXREACLIENTSDK_EXPORT int PXREADeinit(); + +/** + * @brief Send JSON format command to device + * @param devID Device serial number + * @param parameterJson Function and parameters in JSON format, refer to robot SDK documentation for specific usage + * @return 0 Success + * @return -1 Failure + */ +PXREACLIENTSDK_EXPORT int PXREADeviceControlJson(const char *devID,const char *parameterJson); +/** + * @brief Send byte stream to specified device + * @note This command is suitable for SDK caller's custom messages + * @param devID Device serial number + * @param data Starting address of byte stream + * @param len Length of byte stream + * @return 0 Success + * @return -1 Failure + */ +PXREACLIENTSDK_EXPORT int PXREASendBytesToDevice(const char* devID,const char* data,unsigned len); +#ifdef __cplusplus +} +#endif + +#endif // PXREACLIENTSDK_H diff --git a/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/include/aarch64/nlohmann/json.hpp b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/include/aarch64/nlohmann/json.hpp new file mode 100644 index 0000000..8b72ea6 --- /dev/null +++ b/external_dependencies/XRoboToolkit-PC-Service-Pybind_X86_and_ARM64/include/aarch64/nlohmann/json.hpp @@ -0,0 +1,24765 @@ +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + +/****************************************************************************\ + * Note on documentation: The source files contain links to the online * + * documentation of the public API at https://json.nlohmann.me. This URL * + * contains the most recent documentation and should also be applicable to * + * previous versions; documentation for deprecated functions is not * + * removed, but marked deprecated. See "Generate documentation" section in * + * file docs/README.md. * +\****************************************************************************/ + +#ifndef INCLUDE_NLOHMANN_JSON_HPP_ +#define INCLUDE_NLOHMANN_JSON_HPP_ + +#include // all_of, find, for_each +#include // nullptr_t, ptrdiff_t, size_t +#include // hash, less +#include // initializer_list +#ifndef JSON_NO_IO + #include // istream, ostream +#endif // JSON_NO_IO +#include // random_access_iterator_tag +#include // unique_ptr +#include // string, stoi, to_string +#include // declval, forward, move, pair, swap +#include // vector + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +// This file contains all macro definitions affecting or depending on the ABI + +#ifndef JSON_SKIP_LIBRARY_VERSION_CHECK + #if defined(NLOHMANN_JSON_VERSION_MAJOR) && defined(NLOHMANN_JSON_VERSION_MINOR) && defined(NLOHMANN_JSON_VERSION_PATCH) + #if NLOHMANN_JSON_VERSION_MAJOR != 3 || NLOHMANN_JSON_VERSION_MINOR != 11 || NLOHMANN_JSON_VERSION_PATCH != 3 + #warning "Already included a different version of the library!" + #endif + #endif +#endif + +#define NLOHMANN_JSON_VERSION_MAJOR 3 // NOLINT(modernize-macro-to-enum) +#define NLOHMANN_JSON_VERSION_MINOR 11 // NOLINT(modernize-macro-to-enum) +#define NLOHMANN_JSON_VERSION_PATCH 3 // NOLINT(modernize-macro-to-enum) + +#ifndef JSON_DIAGNOSTICS + #define JSON_DIAGNOSTICS 0 +#endif + +#ifndef JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON + #define JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON 0 +#endif + +#if JSON_DIAGNOSTICS + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS _diag +#else + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS +#endif + +#if JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON + #define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON _ldvcmp +#else + #define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON +#endif + +#ifndef NLOHMANN_JSON_NAMESPACE_NO_VERSION + #define NLOHMANN_JSON_NAMESPACE_NO_VERSION 0 +#endif + +// Construct the namespace ABI tags component +#define NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b) json_abi ## a ## b +#define NLOHMANN_JSON_ABI_TAGS_CONCAT(a, b) \ + NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b) + +#define NLOHMANN_JSON_ABI_TAGS \ + NLOHMANN_JSON_ABI_TAGS_CONCAT( \ + NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS, \ + NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON) + +// Construct the namespace version component +#define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) \ + _v ## major ## _ ## minor ## _ ## patch +#define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(major, minor, patch) \ + NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) + +#if NLOHMANN_JSON_NAMESPACE_NO_VERSION +#define NLOHMANN_JSON_NAMESPACE_VERSION +#else +#define NLOHMANN_JSON_NAMESPACE_VERSION \ + NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(NLOHMANN_JSON_VERSION_MAJOR, \ + NLOHMANN_JSON_VERSION_MINOR, \ + NLOHMANN_JSON_VERSION_PATCH) +#endif + +// Combine namespace components +#define NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b) a ## b +#define NLOHMANN_JSON_NAMESPACE_CONCAT(a, b) \ + NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b) + +#ifndef NLOHMANN_JSON_NAMESPACE +#define NLOHMANN_JSON_NAMESPACE \ + nlohmann::NLOHMANN_JSON_NAMESPACE_CONCAT( \ + NLOHMANN_JSON_ABI_TAGS, \ + NLOHMANN_JSON_NAMESPACE_VERSION) +#endif + +#ifndef NLOHMANN_JSON_NAMESPACE_BEGIN +#define NLOHMANN_JSON_NAMESPACE_BEGIN \ + namespace nlohmann \ + { \ + inline namespace NLOHMANN_JSON_NAMESPACE_CONCAT( \ + NLOHMANN_JSON_ABI_TAGS, \ + NLOHMANN_JSON_NAMESPACE_VERSION) \ + { +#endif + +#ifndef NLOHMANN_JSON_NAMESPACE_END +#define NLOHMANN_JSON_NAMESPACE_END \ + } /* namespace (inline namespace) NOLINT(readability/namespace) */ \ + } // namespace nlohmann +#endif + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include // transform +#include // array +#include // forward_list +#include // inserter, front_inserter, end +#include // map +#include // string +#include // tuple, make_tuple +#include // is_arithmetic, is_same, is_enum, underlying_type, is_convertible +#include // unordered_map +#include // pair, declval +#include // valarray + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include // nullptr_t +#include // exception +#if JSON_DIAGNOSTICS + #include // accumulate +#endif +#include // runtime_error +#include // to_string +#include // vector + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include // array +#include // size_t +#include // uint8_t +#include // string + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include // declval, pair +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +// #include + + +NLOHMANN_JSON_NAMESPACE_BEGIN +namespace detail +{ + +template struct make_void +{ + using type = void; +}; +template using void_t = typename make_void::type; + +} // namespace detail +NLOHMANN_JSON_NAMESPACE_END + + +NLOHMANN_JSON_NAMESPACE_BEGIN +namespace detail +{ + +// https://en.cppreference.com/w/cpp/experimental/is_detected +struct nonesuch +{ + nonesuch() = delete; + ~nonesuch() = delete; + nonesuch(nonesuch const&) = delete; + nonesuch(nonesuch const&&) = delete; + void operator=(nonesuch const&) = delete; + void operator=(nonesuch&&) = delete; +}; + +template class Op, + class... Args> +struct detector +{ + using value_t = std::false_type; + using type = Default; +}; + +template class Op, class... Args> +struct detector>, Op, Args...> +{ + using value_t = std::true_type; + using type = Op; +}; + +template class Op, class... Args> +using is_detected = typename detector::value_t; + +template class Op, class... Args> +struct is_detected_lazy : is_detected { }; + +template class Op, class... Args> +using detected_t = typename detector::type; + +template class Op, class... Args> +using detected_or = detector; + +template class Op, class... Args> +using detected_or_t = typename detected_or::type; + +template class Op, class... Args> +using is_detected_exact = std::is_same>; + +template class Op, class... Args> +using is_detected_convertible = + std::is_convertible, To>; + +} // namespace detail +NLOHMANN_JSON_NAMESPACE_END + +// #include + + +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2016-2021 Evan Nemerson +// SPDX-License-Identifier: MIT + +/* Hedley - https://nemequ.github.io/hedley + * Created by Evan Nemerson + */ + +#if !defined(JSON_HEDLEY_VERSION) || (JSON_HEDLEY_VERSION < 15) +#if defined(JSON_HEDLEY_VERSION) + #undef JSON_HEDLEY_VERSION +#endif +#define JSON_HEDLEY_VERSION 15 + +#if defined(JSON_HEDLEY_STRINGIFY_EX) + #undef JSON_HEDLEY_STRINGIFY_EX +#endif +#define JSON_HEDLEY_STRINGIFY_EX(x) #x + +#if defined(JSON_HEDLEY_STRINGIFY) + #undef JSON_HEDLEY_STRINGIFY +#endif +#define JSON_HEDLEY_STRINGIFY(x) JSON_HEDLEY_STRINGIFY_EX(x) + +#if defined(JSON_HEDLEY_CONCAT_EX) + #undef JSON_HEDLEY_CONCAT_EX +#endif +#define JSON_HEDLEY_CONCAT_EX(a,b) a##b + +#if defined(JSON_HEDLEY_CONCAT) + #undef JSON_HEDLEY_CONCAT +#endif +#define JSON_HEDLEY_CONCAT(a,b) JSON_HEDLEY_CONCAT_EX(a,b) + +#if defined(JSON_HEDLEY_CONCAT3_EX) + #undef JSON_HEDLEY_CONCAT3_EX +#endif +#define JSON_HEDLEY_CONCAT3_EX(a,b,c) a##b##c + +#if defined(JSON_HEDLEY_CONCAT3) + #undef JSON_HEDLEY_CONCAT3 +#endif +#define JSON_HEDLEY_CONCAT3(a,b,c) JSON_HEDLEY_CONCAT3_EX(a,b,c) + +#if defined(JSON_HEDLEY_VERSION_ENCODE) + #undef JSON_HEDLEY_VERSION_ENCODE +#endif +#define JSON_HEDLEY_VERSION_ENCODE(major,minor,revision) (((major) * 1000000) + ((minor) * 1000) + (revision)) + +#if defined(JSON_HEDLEY_VERSION_DECODE_MAJOR) + #undef JSON_HEDLEY_VERSION_DECODE_MAJOR +#endif +#define JSON_HEDLEY_VERSION_DECODE_MAJOR(version) ((version) / 1000000) + +#if defined(JSON_HEDLEY_VERSION_DECODE_MINOR) + #undef JSON_HEDLEY_VERSION_DECODE_MINOR +#endif +#define JSON_HEDLEY_VERSION_DECODE_MINOR(version) (((version) % 1000000) / 1000) + +#if defined(JSON_HEDLEY_VERSION_DECODE_REVISION) + #undef JSON_HEDLEY_VERSION_DECODE_REVISION +#endif +#define JSON_HEDLEY_VERSION_DECODE_REVISION(version) ((version) % 1000) + +#if defined(JSON_HEDLEY_GNUC_VERSION) + #undef JSON_HEDLEY_GNUC_VERSION +#endif +#if defined(__GNUC__) && defined(__GNUC_PATCHLEVEL__) + #define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__) +#elif defined(__GNUC__) + #define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, 0) +#endif + +#if defined(JSON_HEDLEY_GNUC_VERSION_CHECK) + #undef JSON_HEDLEY_GNUC_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_GNUC_VERSION) + #define JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_GNUC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_MSVC_VERSION) + #undef JSON_HEDLEY_MSVC_VERSION +#endif +#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 140000000) && !defined(__ICL) + #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 10000000, (_MSC_FULL_VER % 10000000) / 100000, (_MSC_FULL_VER % 100000) / 100) +#elif defined(_MSC_FULL_VER) && !defined(__ICL) + #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 1000000, (_MSC_FULL_VER % 1000000) / 10000, (_MSC_FULL_VER % 10000) / 10) +#elif defined(_MSC_VER) && !defined(__ICL) + #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_VER / 100, _MSC_VER % 100, 0) +#endif + +#if defined(JSON_HEDLEY_MSVC_VERSION_CHECK) + #undef JSON_HEDLEY_MSVC_VERSION_CHECK +#endif +#if !defined(JSON_HEDLEY_MSVC_VERSION) + #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (0) +#elif defined(_MSC_VER) && (_MSC_VER >= 1400) + #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 10000000) + (minor * 100000) + (patch))) +#elif defined(_MSC_VER) && (_MSC_VER >= 1200) + #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 1000000) + (minor * 10000) + (patch))) +#else + #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_VER >= ((major * 100) + (minor))) +#endif + +#if defined(JSON_HEDLEY_INTEL_VERSION) + #undef JSON_HEDLEY_INTEL_VERSION +#endif +#if defined(__INTEL_COMPILER) && defined(__INTEL_COMPILER_UPDATE) && !defined(__ICL) + #define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, __INTEL_COMPILER_UPDATE) +#elif defined(__INTEL_COMPILER) && !defined(__ICL) + #define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, 0) +#endif + +#if defined(JSON_HEDLEY_INTEL_VERSION_CHECK) + #undef JSON_HEDLEY_INTEL_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_INTEL_VERSION) + #define JSON_HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_INTEL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_INTEL_CL_VERSION) + #undef JSON_HEDLEY_INTEL_CL_VERSION +#endif +#if defined(__INTEL_COMPILER) && defined(__INTEL_COMPILER_UPDATE) && defined(__ICL) + #define JSON_HEDLEY_INTEL_CL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER, __INTEL_COMPILER_UPDATE, 0) +#endif + +#if defined(JSON_HEDLEY_INTEL_CL_VERSION_CHECK) + #undef JSON_HEDLEY_INTEL_CL_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_INTEL_CL_VERSION) + #define JSON_HEDLEY_INTEL_CL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_INTEL_CL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_INTEL_CL_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_PGI_VERSION) + #undef JSON_HEDLEY_PGI_VERSION +#endif +#if defined(__PGI) && defined(__PGIC__) && defined(__PGIC_MINOR__) && defined(__PGIC_PATCHLEVEL__) + #define JSON_HEDLEY_PGI_VERSION JSON_HEDLEY_VERSION_ENCODE(__PGIC__, __PGIC_MINOR__, __PGIC_PATCHLEVEL__) +#endif + +#if defined(JSON_HEDLEY_PGI_VERSION_CHECK) + #undef JSON_HEDLEY_PGI_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_PGI_VERSION) + #define JSON_HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_PGI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_SUNPRO_VERSION) + #undef JSON_HEDLEY_SUNPRO_VERSION +#endif +#if defined(__SUNPRO_C) && (__SUNPRO_C > 0x1000) + #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_C >> 16) & 0xf) * 10) + ((__SUNPRO_C >> 12) & 0xf), (((__SUNPRO_C >> 8) & 0xf) * 10) + ((__SUNPRO_C >> 4) & 0xf), (__SUNPRO_C & 0xf) * 10) +#elif defined(__SUNPRO_C) + #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_C >> 8) & 0xf, (__SUNPRO_C >> 4) & 0xf, (__SUNPRO_C) & 0xf) +#elif defined(__SUNPRO_CC) && (__SUNPRO_CC > 0x1000) + #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_CC >> 16) & 0xf) * 10) + ((__SUNPRO_CC >> 12) & 0xf), (((__SUNPRO_CC >> 8) & 0xf) * 10) + ((__SUNPRO_CC >> 4) & 0xf), (__SUNPRO_CC & 0xf) * 10) +#elif defined(__SUNPRO_CC) + #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_CC >> 8) & 0xf, (__SUNPRO_CC >> 4) & 0xf, (__SUNPRO_CC) & 0xf) +#endif + +#if defined(JSON_HEDLEY_SUNPRO_VERSION_CHECK) + #undef JSON_HEDLEY_SUNPRO_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_SUNPRO_VERSION) + #define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_SUNPRO_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION) + #undef JSON_HEDLEY_EMSCRIPTEN_VERSION +#endif +#if defined(__EMSCRIPTEN__) + #define JSON_HEDLEY_EMSCRIPTEN_VERSION JSON_HEDLEY_VERSION_ENCODE(__EMSCRIPTEN_major__, __EMSCRIPTEN_minor__, __EMSCRIPTEN_tiny__) +#endif + +#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK) + #undef JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION) + #define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_EMSCRIPTEN_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_ARM_VERSION) + #undef JSON_HEDLEY_ARM_VERSION +#endif +#if defined(__CC_ARM) && defined(__ARMCOMPILER_VERSION) + #define JSON_HEDLEY_ARM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ARMCOMPILER_VERSION / 1000000, (__ARMCOMPILER_VERSION % 1000000) / 10000, (__ARMCOMPILER_VERSION % 10000) / 100) +#elif defined(__CC_ARM) && defined(__ARMCC_VERSION) + #define JSON_HEDLEY_ARM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ARMCC_VERSION / 1000000, (__ARMCC_VERSION % 1000000) / 10000, (__ARMCC_VERSION % 10000) / 100) +#endif + +#if defined(JSON_HEDLEY_ARM_VERSION_CHECK) + #undef JSON_HEDLEY_ARM_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_ARM_VERSION) + #define JSON_HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_ARM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_IBM_VERSION) + #undef JSON_HEDLEY_IBM_VERSION +#endif +#if defined(__ibmxl__) + #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ibmxl_version__, __ibmxl_release__, __ibmxl_modification__) +#elif defined(__xlC__) && defined(__xlC_ver__) + #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, (__xlC_ver__ >> 8) & 0xff) +#elif defined(__xlC__) + #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, 0) +#endif + +#if defined(JSON_HEDLEY_IBM_VERSION_CHECK) + #undef JSON_HEDLEY_IBM_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_IBM_VERSION) + #define JSON_HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_IBM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_VERSION) + #undef JSON_HEDLEY_TI_VERSION +#endif +#if \ + defined(__TI_COMPILER_VERSION__) && \ + ( \ + defined(__TMS470__) || defined(__TI_ARM__) || \ + defined(__MSP430__) || \ + defined(__TMS320C2000__) \ + ) +#if (__TI_COMPILER_VERSION__ >= 16000000) + #define JSON_HEDLEY_TI_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif +#endif + +#if defined(JSON_HEDLEY_TI_VERSION_CHECK) + #undef JSON_HEDLEY_TI_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_VERSION) + #define JSON_HEDLEY_TI_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_CL2000_VERSION) + #undef JSON_HEDLEY_TI_CL2000_VERSION +#endif +#if defined(__TI_COMPILER_VERSION__) && defined(__TMS320C2000__) + #define JSON_HEDLEY_TI_CL2000_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif + +#if defined(JSON_HEDLEY_TI_CL2000_VERSION_CHECK) + #undef JSON_HEDLEY_TI_CL2000_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_CL2000_VERSION) + #define JSON_HEDLEY_TI_CL2000_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL2000_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_CL2000_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_CL430_VERSION) + #undef JSON_HEDLEY_TI_CL430_VERSION +#endif +#if defined(__TI_COMPILER_VERSION__) && defined(__MSP430__) + #define JSON_HEDLEY_TI_CL430_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif + +#if defined(JSON_HEDLEY_TI_CL430_VERSION_CHECK) + #undef JSON_HEDLEY_TI_CL430_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_CL430_VERSION) + #define JSON_HEDLEY_TI_CL430_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL430_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_CL430_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_ARMCL_VERSION) + #undef JSON_HEDLEY_TI_ARMCL_VERSION +#endif +#if defined(__TI_COMPILER_VERSION__) && (defined(__TMS470__) || defined(__TI_ARM__)) + #define JSON_HEDLEY_TI_ARMCL_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif + +#if defined(JSON_HEDLEY_TI_ARMCL_VERSION_CHECK) + #undef JSON_HEDLEY_TI_ARMCL_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_ARMCL_VERSION) + #define JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_ARMCL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_CL6X_VERSION) + #undef JSON_HEDLEY_TI_CL6X_VERSION +#endif +#if defined(__TI_COMPILER_VERSION__) && defined(__TMS320C6X__) + #define JSON_HEDLEY_TI_CL6X_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif + +#if defined(JSON_HEDLEY_TI_CL6X_VERSION_CHECK) + #undef JSON_HEDLEY_TI_CL6X_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_CL6X_VERSION) + #define JSON_HEDLEY_TI_CL6X_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL6X_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_CL6X_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_CL7X_VERSION) + #undef JSON_HEDLEY_TI_CL7X_VERSION +#endif +#if defined(__TI_COMPILER_VERSION__) && defined(__C7000__) + #define JSON_HEDLEY_TI_CL7X_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif + +#if defined(JSON_HEDLEY_TI_CL7X_VERSION_CHECK) + #undef JSON_HEDLEY_TI_CL7X_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_CL7X_VERSION) + #define JSON_HEDLEY_TI_CL7X_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL7X_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_CL7X_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_CLPRU_VERSION) + #undef JSON_HEDLEY_TI_CLPRU_VERSION +#endif +#if defined(__TI_COMPILER_VERSION__) && defined(__PRU__) + #define JSON_HEDLEY_TI_CLPRU_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif + +#if defined(JSON_HEDLEY_TI_CLPRU_VERSION_CHECK) + #undef JSON_HEDLEY_TI_CLPRU_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_CLPRU_VERSION) + #define JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CLPRU_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_CRAY_VERSION) + #undef JSON_HEDLEY_CRAY_VERSION +#endif +#if defined(_CRAYC) + #if defined(_RELEASE_PATCHLEVEL) + #define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, _RELEASE_PATCHLEVEL) + #else + #define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, 0) + #endif +#endif + +#if defined(JSON_HEDLEY_CRAY_VERSION_CHECK) + #undef JSON_HEDLEY_CRAY_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_CRAY_VERSION) + #define JSON_HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_CRAY_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_IAR_VERSION) + #undef JSON_HEDLEY_IAR_VERSION +#endif +#if defined(__IAR_SYSTEMS_ICC__) + #if __VER__ > 1000 + #define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE((__VER__ / 1000000), ((__VER__ / 1000) % 1000), (__VER__ % 1000)) + #else + #define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE(__VER__ / 100, __VER__ % 100, 0) + #endif +#endif + +#if defined(JSON_HEDLEY_IAR_VERSION_CHECK) + #undef JSON_HEDLEY_IAR_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_IAR_VERSION) + #define JSON_HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_IAR_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TINYC_VERSION) + #undef JSON_HEDLEY_TINYC_VERSION +#endif +#if defined(__TINYC__) + #define JSON_HEDLEY_TINYC_VERSION JSON_HEDLEY_VERSION_ENCODE(__TINYC__ / 1000, (__TINYC__ / 100) % 10, __TINYC__ % 100) +#endif + +#if defined(JSON_HEDLEY_TINYC_VERSION_CHECK) + #undef JSON_HEDLEY_TINYC_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TINYC_VERSION) + #define JSON_HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TINYC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_DMC_VERSION) + #undef JSON_HEDLEY_DMC_VERSION +#endif +#if defined(__DMC__) + #define JSON_HEDLEY_DMC_VERSION JSON_HEDLEY_VERSION_ENCODE(__DMC__ >> 8, (__DMC__ >> 4) & 0xf, __DMC__ & 0xf) +#endif + +#if defined(JSON_HEDLEY_DMC_VERSION_CHECK) + #undef JSON_HEDLEY_DMC_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_DMC_VERSION) + #define JSON_HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_DMC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_COMPCERT_VERSION) + #undef JSON_HEDLEY_COMPCERT_VERSION +#endif +#if defined(__COMPCERT_VERSION__) + #define JSON_HEDLEY_COMPCERT_VERSION JSON_HEDLEY_VERSION_ENCODE(__COMPCERT_VERSION__ / 10000, (__COMPCERT_VERSION__ / 100) % 100, __COMPCERT_VERSION__ % 100) +#endif + +#if defined(JSON_HEDLEY_COMPCERT_VERSION_CHECK) + #undef JSON_HEDLEY_COMPCERT_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_COMPCERT_VERSION) + #define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_COMPCERT_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_PELLES_VERSION) + #undef JSON_HEDLEY_PELLES_VERSION +#endif +#if defined(__POCC__) + #define JSON_HEDLEY_PELLES_VERSION JSON_HEDLEY_VERSION_ENCODE(__POCC__ / 100, __POCC__ % 100, 0) +#endif + +#if defined(JSON_HEDLEY_PELLES_VERSION_CHECK) + #undef JSON_HEDLEY_PELLES_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_PELLES_VERSION) + #define JSON_HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_PELLES_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_MCST_LCC_VERSION) + #undef JSON_HEDLEY_MCST_LCC_VERSION +#endif +#if defined(__LCC__) && defined(__LCC_MINOR__) + #define JSON_HEDLEY_MCST_LCC_VERSION JSON_HEDLEY_VERSION_ENCODE(__LCC__ / 100, __LCC__ % 100, __LCC_MINOR__) +#endif + +#if defined(JSON_HEDLEY_MCST_LCC_VERSION_CHECK) + #undef JSON_HEDLEY_MCST_LCC_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_MCST_LCC_VERSION) + #define JSON_HEDLEY_MCST_LCC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_MCST_LCC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_MCST_LCC_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_GCC_VERSION) + #undef JSON_HEDLEY_GCC_VERSION +#endif +#if \ + defined(JSON_HEDLEY_GNUC_VERSION) && \ + !defined(__clang__) && \ + !defined(JSON_HEDLEY_INTEL_VERSION) && \ + !defined(JSON_HEDLEY_PGI_VERSION) && \ + !defined(JSON_HEDLEY_ARM_VERSION) && \ + !defined(JSON_HEDLEY_CRAY_VERSION) && \ + !defined(JSON_HEDLEY_TI_VERSION) && \ + !defined(JSON_HEDLEY_TI_ARMCL_VERSION) && \ + !defined(JSON_HEDLEY_TI_CL430_VERSION) && \ + !defined(JSON_HEDLEY_TI_CL2000_VERSION) && \ + !defined(JSON_HEDLEY_TI_CL6X_VERSION) && \ + !defined(JSON_HEDLEY_TI_CL7X_VERSION) && \ + !defined(JSON_HEDLEY_TI_CLPRU_VERSION) && \ + !defined(__COMPCERT__) && \ + !defined(JSON_HEDLEY_MCST_LCC_VERSION) + #define JSON_HEDLEY_GCC_VERSION JSON_HEDLEY_GNUC_VERSION +#endif + +#if defined(JSON_HEDLEY_GCC_VERSION_CHECK) + #undef JSON_HEDLEY_GCC_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_GCC_VERSION) + #define JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_GCC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_HAS_ATTRIBUTE) + #undef JSON_HEDLEY_HAS_ATTRIBUTE +#endif +#if \ + defined(__has_attribute) && \ + ( \ + (!defined(JSON_HEDLEY_IAR_VERSION) || JSON_HEDLEY_IAR_VERSION_CHECK(8,5,9)) \ + ) +# define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) __has_attribute(attribute) +#else +# define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_ATTRIBUTE) + #undef JSON_HEDLEY_GNUC_HAS_ATTRIBUTE +#endif +#if defined(__has_attribute) + #define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_HAS_ATTRIBUTE(attribute) +#else + #define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_ATTRIBUTE) + #undef JSON_HEDLEY_GCC_HAS_ATTRIBUTE +#endif +#if defined(__has_attribute) + #define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_HAS_ATTRIBUTE(attribute) +#else + #define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_HAS_CPP_ATTRIBUTE) + #undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE +#endif +#if \ + defined(__has_cpp_attribute) && \ + defined(__cplusplus) && \ + (!defined(JSON_HEDLEY_SUNPRO_VERSION) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0)) + #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) __has_cpp_attribute(attribute) +#else + #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) (0) +#endif + +#if defined(JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS) + #undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS +#endif +#if !defined(__cplusplus) || !defined(__has_cpp_attribute) + #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) (0) +#elif \ + !defined(JSON_HEDLEY_PGI_VERSION) && \ + !defined(JSON_HEDLEY_IAR_VERSION) && \ + (!defined(JSON_HEDLEY_SUNPRO_VERSION) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0)) && \ + (!defined(JSON_HEDLEY_MSVC_VERSION) || JSON_HEDLEY_MSVC_VERSION_CHECK(19,20,0)) + #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) JSON_HEDLEY_HAS_CPP_ATTRIBUTE(ns::attribute) +#else + #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE) + #undef JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE +#endif +#if defined(__has_cpp_attribute) && defined(__cplusplus) + #define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute) +#else + #define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE) + #undef JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE +#endif +#if defined(__has_cpp_attribute) && defined(__cplusplus) + #define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute) +#else + #define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_HAS_BUILTIN) + #undef JSON_HEDLEY_HAS_BUILTIN +#endif +#if defined(__has_builtin) + #define JSON_HEDLEY_HAS_BUILTIN(builtin) __has_builtin(builtin) +#else + #define JSON_HEDLEY_HAS_BUILTIN(builtin) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_BUILTIN) + #undef JSON_HEDLEY_GNUC_HAS_BUILTIN +#endif +#if defined(__has_builtin) + #define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin) +#else + #define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_BUILTIN) + #undef JSON_HEDLEY_GCC_HAS_BUILTIN +#endif +#if defined(__has_builtin) + #define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin) +#else + #define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_HAS_FEATURE) + #undef JSON_HEDLEY_HAS_FEATURE +#endif +#if defined(__has_feature) + #define JSON_HEDLEY_HAS_FEATURE(feature) __has_feature(feature) +#else + #define JSON_HEDLEY_HAS_FEATURE(feature) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_FEATURE) + #undef JSON_HEDLEY_GNUC_HAS_FEATURE +#endif +#if defined(__has_feature) + #define JSON_HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature) +#else + #define JSON_HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_FEATURE) + #undef JSON_HEDLEY_GCC_HAS_FEATURE +#endif +#if defined(__has_feature) + #define JSON_HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature) +#else + #define JSON_HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_HAS_EXTENSION) + #undef JSON_HEDLEY_HAS_EXTENSION +#endif +#if defined(__has_extension) + #define JSON_HEDLEY_HAS_EXTENSION(extension) __has_extension(extension) +#else + #define JSON_HEDLEY_HAS_EXTENSION(extension) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_EXTENSION) + #undef JSON_HEDLEY_GNUC_HAS_EXTENSION +#endif +#if defined(__has_extension) + #define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension) +#else + #define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_EXTENSION) + #undef JSON_HEDLEY_GCC_HAS_EXTENSION +#endif +#if defined(__has_extension) + #define JSON_HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension) +#else + #define JSON_HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE) + #undef JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE +#endif +#if defined(__has_declspec_attribute) + #define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) __has_declspec_attribute(attribute) +#else + #define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE) + #undef JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE +#endif +#if defined(__has_declspec_attribute) + #define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute) +#else + #define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE) + #undef JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE +#endif +#if defined(__has_declspec_attribute) + #define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute) +#else + #define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_HAS_WARNING) + #undef JSON_HEDLEY_HAS_WARNING +#endif +#if defined(__has_warning) + #define JSON_HEDLEY_HAS_WARNING(warning) __has_warning(warning) +#else + #define JSON_HEDLEY_HAS_WARNING(warning) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_WARNING) + #undef JSON_HEDLEY_GNUC_HAS_WARNING +#endif +#if defined(__has_warning) + #define JSON_HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning) +#else + #define JSON_HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_WARNING) + #undef JSON_HEDLEY_GCC_HAS_WARNING +#endif +#if defined(__has_warning) + #define JSON_HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning) +#else + #define JSON_HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if \ + (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \ + defined(__clang__) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(18,4,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,7,0) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(2,0,1) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,1,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,0,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_CRAY_VERSION_CHECK(5,0,0) || \ + JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,17) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(8,0,0) || \ + (JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) && defined(__C99_PRAGMA_OPERATOR)) + #define JSON_HEDLEY_PRAGMA(value) _Pragma(#value) +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) + #define JSON_HEDLEY_PRAGMA(value) __pragma(value) +#else + #define JSON_HEDLEY_PRAGMA(value) +#endif + +#if defined(JSON_HEDLEY_DIAGNOSTIC_PUSH) + #undef JSON_HEDLEY_DIAGNOSTIC_PUSH +#endif +#if defined(JSON_HEDLEY_DIAGNOSTIC_POP) + #undef JSON_HEDLEY_DIAGNOSTIC_POP +#endif +#if defined(__clang__) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("clang diagnostic push") + #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("clang diagnostic pop") +#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)") + #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("GCC diagnostic push") + #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("GCC diagnostic pop") +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH __pragma(warning(push)) + #define JSON_HEDLEY_DIAGNOSTIC_POP __pragma(warning(pop)) +#elif JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("push") + #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("pop") +#elif \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,4,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,1,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("diag_push") + #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("diag_pop") +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,90,0) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)") + #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)") +#else + #define JSON_HEDLEY_DIAGNOSTIC_PUSH + #define JSON_HEDLEY_DIAGNOSTIC_POP +#endif + +/* JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_ is for + HEDLEY INTERNAL USE ONLY. API subject to change without notice. */ +#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_) + #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_ +#endif +#if defined(__cplusplus) +# if JSON_HEDLEY_HAS_WARNING("-Wc++98-compat") +# if JSON_HEDLEY_HAS_WARNING("-Wc++17-extensions") +# if JSON_HEDLEY_HAS_WARNING("-Wc++1z-extensions") +# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \ + _Pragma("clang diagnostic ignored \"-Wc++17-extensions\"") \ + _Pragma("clang diagnostic ignored \"-Wc++1z-extensions\"") \ + xpr \ + JSON_HEDLEY_DIAGNOSTIC_POP +# else +# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \ + _Pragma("clang diagnostic ignored \"-Wc++17-extensions\"") \ + xpr \ + JSON_HEDLEY_DIAGNOSTIC_POP +# endif +# else +# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \ + xpr \ + JSON_HEDLEY_DIAGNOSTIC_POP +# endif +# endif +#endif +#if !defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(x) x +#endif + +#if defined(JSON_HEDLEY_CONST_CAST) + #undef JSON_HEDLEY_CONST_CAST +#endif +#if defined(__cplusplus) +# define JSON_HEDLEY_CONST_CAST(T, expr) (const_cast(expr)) +#elif \ + JSON_HEDLEY_HAS_WARNING("-Wcast-qual") || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) +# define JSON_HEDLEY_CONST_CAST(T, expr) (__extension__ ({ \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \ + ((T) (expr)); \ + JSON_HEDLEY_DIAGNOSTIC_POP \ + })) +#else +# define JSON_HEDLEY_CONST_CAST(T, expr) ((T) (expr)) +#endif + +#if defined(JSON_HEDLEY_REINTERPRET_CAST) + #undef JSON_HEDLEY_REINTERPRET_CAST +#endif +#if defined(__cplusplus) + #define JSON_HEDLEY_REINTERPRET_CAST(T, expr) (reinterpret_cast(expr)) +#else + #define JSON_HEDLEY_REINTERPRET_CAST(T, expr) ((T) (expr)) +#endif + +#if defined(JSON_HEDLEY_STATIC_CAST) + #undef JSON_HEDLEY_STATIC_CAST +#endif +#if defined(__cplusplus) + #define JSON_HEDLEY_STATIC_CAST(T, expr) (static_cast(expr)) +#else + #define JSON_HEDLEY_STATIC_CAST(T, expr) ((T) (expr)) +#endif + +#if defined(JSON_HEDLEY_CPP_CAST) + #undef JSON_HEDLEY_CPP_CAST +#endif +#if defined(__cplusplus) +# if JSON_HEDLEY_HAS_WARNING("-Wold-style-cast") +# define JSON_HEDLEY_CPP_CAST(T, expr) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wold-style-cast\"") \ + ((T) (expr)) \ + JSON_HEDLEY_DIAGNOSTIC_POP +# elif JSON_HEDLEY_IAR_VERSION_CHECK(8,3,0) +# define JSON_HEDLEY_CPP_CAST(T, expr) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("diag_suppress=Pe137") \ + JSON_HEDLEY_DIAGNOSTIC_POP +# else +# define JSON_HEDLEY_CPP_CAST(T, expr) ((T) (expr)) +# endif +#else +# define JSON_HEDLEY_CPP_CAST(T, expr) (expr) +#endif + +#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED) + #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wdeprecated-declarations") + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") +#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warning(disable:1478 1786)") +#elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED __pragma(warning(disable:1478 1786)) +#elif JSON_HEDLEY_PGI_VERSION_CHECK(20,7,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1216,1444,1445") +#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1444") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED __pragma(warning(disable:4996)) +#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1444") +#elif \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1291,1718") +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && !defined(__cplusplus) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,E_DEPRECATED_ATT,E_DEPRECATED_ATT_MESS)") +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && defined(__cplusplus) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,symdeprecated,symdeprecated2)") +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress=Pe1444,Pe1215") +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,90,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warn(disable:2241)") +#else + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED +#endif + +#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS) + #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("clang diagnostic ignored \"-Wunknown-pragmas\"") +#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("warning(disable:161)") +#elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS __pragma(warning(disable:161)) +#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 1675") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("GCC diagnostic ignored \"-Wunknown-pragmas\"") +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS __pragma(warning(disable:4068)) +#elif \ + JSON_HEDLEY_TI_VERSION_CHECK(16,9,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163") +#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163") +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress=Pe161") +#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 161") +#else + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS +#endif + +#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES) + #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wunknown-attributes") + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("clang diagnostic ignored \"-Wunknown-attributes\"") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") +#elif JSON_HEDLEY_INTEL_VERSION_CHECK(17,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("warning(disable:1292)") +#elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES __pragma(warning(disable:1292)) +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(19,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES __pragma(warning(disable:5030)) +#elif JSON_HEDLEY_PGI_VERSION_CHECK(20,7,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097,1098") +#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097") +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("error_messages(off,attrskipunsup)") +#elif \ + JSON_HEDLEY_TI_VERSION_CHECK(18,1,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,3,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1173") +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress=Pe1097") +#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097") +#else + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES +#endif + +#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL) + #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wcast-qual") + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("clang diagnostic ignored \"-Wcast-qual\"") +#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("warning(disable:2203 2331)") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#else + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL +#endif + +#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION) + #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wunused-function") + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("clang diagnostic ignored \"-Wunused-function\"") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("GCC diagnostic ignored \"-Wunused-function\"") +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(1,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION __pragma(warning(disable:4505)) +#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("diag_suppress 3142") +#else + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION +#endif + +#if defined(JSON_HEDLEY_DEPRECATED) + #undef JSON_HEDLEY_DEPRECATED +#endif +#if defined(JSON_HEDLEY_DEPRECATED_FOR) + #undef JSON_HEDLEY_DEPRECATED_FOR +#endif +#if \ + JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_DEPRECATED(since) __declspec(deprecated("Since " # since)) + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated("Since " #since "; use " #replacement)) +#elif \ + (JSON_HEDLEY_HAS_EXTENSION(attribute_deprecated_with_message) && !defined(JSON_HEDLEY_IAR_VERSION)) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,5,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(18,1,0) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(18,1,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,3,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__("Since " #since))) + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__("Since " #since "; use " #replacement))) +#elif defined(__cplusplus) && (__cplusplus >= 201402L) + #define JSON_HEDLEY_DEPRECATED(since) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[deprecated("Since " #since)]]) + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[deprecated("Since " #since "; use " #replacement)]]) +#elif \ + JSON_HEDLEY_HAS_ATTRIBUTE(deprecated) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \ + JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) + #define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__)) + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__)) +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ + JSON_HEDLEY_PELLES_VERSION_CHECK(6,50,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_DEPRECATED(since) __declspec(deprecated) + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated) +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_DEPRECATED(since) _Pragma("deprecated") + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) _Pragma("deprecated") +#else + #define JSON_HEDLEY_DEPRECATED(since) + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) +#endif + +#if defined(JSON_HEDLEY_UNAVAILABLE) + #undef JSON_HEDLEY_UNAVAILABLE +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(warning) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_UNAVAILABLE(available_since) __attribute__((__warning__("Not available until " #available_since))) +#else + #define JSON_HEDLEY_UNAVAILABLE(available_since) +#endif + +#if defined(JSON_HEDLEY_WARN_UNUSED_RESULT) + #undef JSON_HEDLEY_WARN_UNUSED_RESULT +#endif +#if defined(JSON_HEDLEY_WARN_UNUSED_RESULT_MSG) + #undef JSON_HEDLEY_WARN_UNUSED_RESULT_MSG +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(warn_unused_result) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0) && defined(__cplusplus)) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_WARN_UNUSED_RESULT __attribute__((__warn_unused_result__)) + #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) __attribute__((__warn_unused_result__)) +#elif (JSON_HEDLEY_HAS_CPP_ATTRIBUTE(nodiscard) >= 201907L) + #define JSON_HEDLEY_WARN_UNUSED_RESULT JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]]) + #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard(msg)]]) +#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE(nodiscard) + #define JSON_HEDLEY_WARN_UNUSED_RESULT JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]]) + #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]]) +#elif defined(_Check_return_) /* SAL */ + #define JSON_HEDLEY_WARN_UNUSED_RESULT _Check_return_ + #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) _Check_return_ +#else + #define JSON_HEDLEY_WARN_UNUSED_RESULT + #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) +#endif + +#if defined(JSON_HEDLEY_SENTINEL) + #undef JSON_HEDLEY_SENTINEL +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(sentinel) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(5,4,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_SENTINEL(position) __attribute__((__sentinel__(position))) +#else + #define JSON_HEDLEY_SENTINEL(position) +#endif + +#if defined(JSON_HEDLEY_NO_RETURN) + #undef JSON_HEDLEY_NO_RETURN +#endif +#if JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_NO_RETURN __noreturn +#elif \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_NO_RETURN __attribute__((__noreturn__)) +#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L + #define JSON_HEDLEY_NO_RETURN _Noreturn +#elif defined(__cplusplus) && (__cplusplus >= 201103L) + #define JSON_HEDLEY_NO_RETURN JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[noreturn]]) +#elif \ + JSON_HEDLEY_HAS_ATTRIBUTE(noreturn) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,2,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) + #define JSON_HEDLEY_NO_RETURN __attribute__((__noreturn__)) +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) + #define JSON_HEDLEY_NO_RETURN _Pragma("does_not_return") +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_NO_RETURN __declspec(noreturn) +#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,0,0) && defined(__cplusplus) + #define JSON_HEDLEY_NO_RETURN _Pragma("FUNC_NEVER_RETURNS;") +#elif JSON_HEDLEY_COMPCERT_VERSION_CHECK(3,2,0) + #define JSON_HEDLEY_NO_RETURN __attribute((noreturn)) +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(9,0,0) + #define JSON_HEDLEY_NO_RETURN __declspec(noreturn) +#else + #define JSON_HEDLEY_NO_RETURN +#endif + +#if defined(JSON_HEDLEY_NO_ESCAPE) + #undef JSON_HEDLEY_NO_ESCAPE +#endif +#if JSON_HEDLEY_HAS_ATTRIBUTE(noescape) + #define JSON_HEDLEY_NO_ESCAPE __attribute__((__noescape__)) +#else + #define JSON_HEDLEY_NO_ESCAPE +#endif + +#if defined(JSON_HEDLEY_UNREACHABLE) + #undef JSON_HEDLEY_UNREACHABLE +#endif +#if defined(JSON_HEDLEY_UNREACHABLE_RETURN) + #undef JSON_HEDLEY_UNREACHABLE_RETURN +#endif +#if defined(JSON_HEDLEY_ASSUME) + #undef JSON_HEDLEY_ASSUME +#endif +#if \ + JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_ASSUME(expr) __assume(expr) +#elif JSON_HEDLEY_HAS_BUILTIN(__builtin_assume) + #define JSON_HEDLEY_ASSUME(expr) __builtin_assume(expr) +#elif \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) + #if defined(__cplusplus) + #define JSON_HEDLEY_ASSUME(expr) std::_nassert(expr) + #else + #define JSON_HEDLEY_ASSUME(expr) _nassert(expr) + #endif +#endif +#if \ + (JSON_HEDLEY_HAS_BUILTIN(__builtin_unreachable) && (!defined(JSON_HEDLEY_ARM_VERSION))) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,5,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(18,10,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(13,1,5) || \ + JSON_HEDLEY_CRAY_VERSION_CHECK(10,0,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_UNREACHABLE() __builtin_unreachable() +#elif defined(JSON_HEDLEY_ASSUME) + #define JSON_HEDLEY_UNREACHABLE() JSON_HEDLEY_ASSUME(0) +#endif +#if !defined(JSON_HEDLEY_ASSUME) + #if defined(JSON_HEDLEY_UNREACHABLE) + #define JSON_HEDLEY_ASSUME(expr) JSON_HEDLEY_STATIC_CAST(void, ((expr) ? 1 : (JSON_HEDLEY_UNREACHABLE(), 1))) + #else + #define JSON_HEDLEY_ASSUME(expr) JSON_HEDLEY_STATIC_CAST(void, expr) + #endif +#endif +#if defined(JSON_HEDLEY_UNREACHABLE) + #if \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) + #define JSON_HEDLEY_UNREACHABLE_RETURN(value) return (JSON_HEDLEY_STATIC_CAST(void, JSON_HEDLEY_ASSUME(0)), (value)) + #else + #define JSON_HEDLEY_UNREACHABLE_RETURN(value) JSON_HEDLEY_UNREACHABLE() + #endif +#else + #define JSON_HEDLEY_UNREACHABLE_RETURN(value) return (value) +#endif +#if !defined(JSON_HEDLEY_UNREACHABLE) + #define JSON_HEDLEY_UNREACHABLE() JSON_HEDLEY_ASSUME(0) +#endif + +JSON_HEDLEY_DIAGNOSTIC_PUSH +#if JSON_HEDLEY_HAS_WARNING("-Wpedantic") + #pragma clang diagnostic ignored "-Wpedantic" +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic") && defined(__cplusplus) + #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" +#endif +#if JSON_HEDLEY_GCC_HAS_WARNING("-Wvariadic-macros",4,0,0) + #if defined(__clang__) + #pragma clang diagnostic ignored "-Wvariadic-macros" + #elif defined(JSON_HEDLEY_GCC_VERSION) + #pragma GCC diagnostic ignored "-Wvariadic-macros" + #endif +#endif +#if defined(JSON_HEDLEY_NON_NULL) + #undef JSON_HEDLEY_NON_NULL +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(nonnull) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) + #define JSON_HEDLEY_NON_NULL(...) __attribute__((__nonnull__(__VA_ARGS__))) +#else + #define JSON_HEDLEY_NON_NULL(...) +#endif +JSON_HEDLEY_DIAGNOSTIC_POP + +#if defined(JSON_HEDLEY_PRINTF_FORMAT) + #undef JSON_HEDLEY_PRINTF_FORMAT +#endif +#if defined(__MINGW32__) && JSON_HEDLEY_GCC_HAS_ATTRIBUTE(format,4,4,0) && !defined(__USE_MINGW_ANSI_STDIO) + #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(ms_printf, string_idx, first_to_check))) +#elif defined(__MINGW32__) && JSON_HEDLEY_GCC_HAS_ATTRIBUTE(format,4,4,0) && defined(__USE_MINGW_ANSI_STDIO) + #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(gnu_printf, string_idx, first_to_check))) +#elif \ + JSON_HEDLEY_HAS_ATTRIBUTE(format) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(__printf__, string_idx, first_to_check))) +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(6,0,0) + #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __declspec(vaformat(printf,string_idx,first_to_check)) +#else + #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) +#endif + +#if defined(JSON_HEDLEY_CONSTEXPR) + #undef JSON_HEDLEY_CONSTEXPR +#endif +#if defined(__cplusplus) + #if __cplusplus >= 201103L + #define JSON_HEDLEY_CONSTEXPR JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(constexpr) + #endif +#endif +#if !defined(JSON_HEDLEY_CONSTEXPR) + #define JSON_HEDLEY_CONSTEXPR +#endif + +#if defined(JSON_HEDLEY_PREDICT) + #undef JSON_HEDLEY_PREDICT +#endif +#if defined(JSON_HEDLEY_LIKELY) + #undef JSON_HEDLEY_LIKELY +#endif +#if defined(JSON_HEDLEY_UNLIKELY) + #undef JSON_HEDLEY_UNLIKELY +#endif +#if defined(JSON_HEDLEY_UNPREDICTABLE) + #undef JSON_HEDLEY_UNPREDICTABLE +#endif +#if JSON_HEDLEY_HAS_BUILTIN(__builtin_unpredictable) + #define JSON_HEDLEY_UNPREDICTABLE(expr) __builtin_unpredictable((expr)) +#endif +#if \ + (JSON_HEDLEY_HAS_BUILTIN(__builtin_expect_with_probability) && !defined(JSON_HEDLEY_PGI_VERSION)) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(9,0,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) +# define JSON_HEDLEY_PREDICT(expr, value, probability) __builtin_expect_with_probability( (expr), (value), (probability)) +# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) __builtin_expect_with_probability(!!(expr), 1 , (probability)) +# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) __builtin_expect_with_probability(!!(expr), 0 , (probability)) +# define JSON_HEDLEY_LIKELY(expr) __builtin_expect (!!(expr), 1 ) +# define JSON_HEDLEY_UNLIKELY(expr) __builtin_expect (!!(expr), 0 ) +#elif \ + (JSON_HEDLEY_HAS_BUILTIN(__builtin_expect) && !defined(JSON_HEDLEY_INTEL_CL_VERSION)) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0) && defined(__cplusplus)) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,7,0) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(3,1,0) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,1,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,27) || \ + JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) +# define JSON_HEDLEY_PREDICT(expr, expected, probability) \ + (((probability) >= 0.9) ? __builtin_expect((expr), (expected)) : (JSON_HEDLEY_STATIC_CAST(void, expected), (expr))) +# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) \ + (__extension__ ({ \ + double hedley_probability_ = (probability); \ + ((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 1) : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 0) : !!(expr))); \ + })) +# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) \ + (__extension__ ({ \ + double hedley_probability_ = (probability); \ + ((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 0) : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 1) : !!(expr))); \ + })) +# define JSON_HEDLEY_LIKELY(expr) __builtin_expect(!!(expr), 1) +# define JSON_HEDLEY_UNLIKELY(expr) __builtin_expect(!!(expr), 0) +#else +# define JSON_HEDLEY_PREDICT(expr, expected, probability) (JSON_HEDLEY_STATIC_CAST(void, expected), (expr)) +# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) (!!(expr)) +# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) (!!(expr)) +# define JSON_HEDLEY_LIKELY(expr) (!!(expr)) +# define JSON_HEDLEY_UNLIKELY(expr) (!!(expr)) +#endif +#if !defined(JSON_HEDLEY_UNPREDICTABLE) + #define JSON_HEDLEY_UNPREDICTABLE(expr) JSON_HEDLEY_PREDICT(expr, 1, 0.5) +#endif + +#if defined(JSON_HEDLEY_MALLOC) + #undef JSON_HEDLEY_MALLOC +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(malloc) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(12,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_MALLOC __attribute__((__malloc__)) +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) + #define JSON_HEDLEY_MALLOC _Pragma("returns_new_memory") +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_MALLOC __declspec(restrict) +#else + #define JSON_HEDLEY_MALLOC +#endif + +#if defined(JSON_HEDLEY_PURE) + #undef JSON_HEDLEY_PURE +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(pure) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(2,96,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) +# define JSON_HEDLEY_PURE __attribute__((__pure__)) +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) +# define JSON_HEDLEY_PURE _Pragma("does_not_write_global_data") +#elif defined(__cplusplus) && \ + ( \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(2,0,1) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) \ + ) +# define JSON_HEDLEY_PURE _Pragma("FUNC_IS_PURE;") +#else +# define JSON_HEDLEY_PURE +#endif + +#if defined(JSON_HEDLEY_CONST) + #undef JSON_HEDLEY_CONST +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(const) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(2,5,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_CONST __attribute__((__const__)) +#elif \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) + #define JSON_HEDLEY_CONST _Pragma("no_side_effect") +#else + #define JSON_HEDLEY_CONST JSON_HEDLEY_PURE +#endif + +#if defined(JSON_HEDLEY_RESTRICT) + #undef JSON_HEDLEY_RESTRICT +#endif +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && !defined(__cplusplus) + #define JSON_HEDLEY_RESTRICT restrict +#elif \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ + JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,4) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,1,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus)) || \ + JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) || \ + defined(__clang__) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_RESTRICT __restrict +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,3,0) && !defined(__cplusplus) + #define JSON_HEDLEY_RESTRICT _Restrict +#else + #define JSON_HEDLEY_RESTRICT +#endif + +#if defined(JSON_HEDLEY_INLINE) + #undef JSON_HEDLEY_INLINE +#endif +#if \ + (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \ + (defined(__cplusplus) && (__cplusplus >= 199711L)) + #define JSON_HEDLEY_INLINE inline +#elif \ + defined(JSON_HEDLEY_GCC_VERSION) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(6,2,0) + #define JSON_HEDLEY_INLINE __inline__ +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(12,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,1,0) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(3,1,0) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_INLINE __inline +#else + #define JSON_HEDLEY_INLINE +#endif + +#if defined(JSON_HEDLEY_ALWAYS_INLINE) + #undef JSON_HEDLEY_ALWAYS_INLINE +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(always_inline) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \ + JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) +# define JSON_HEDLEY_ALWAYS_INLINE __attribute__((__always_inline__)) JSON_HEDLEY_INLINE +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(12,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) +# define JSON_HEDLEY_ALWAYS_INLINE __forceinline +#elif defined(__cplusplus) && \ + ( \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) \ + ) +# define JSON_HEDLEY_ALWAYS_INLINE _Pragma("FUNC_ALWAYS_INLINE;") +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) +# define JSON_HEDLEY_ALWAYS_INLINE _Pragma("inline=forced") +#else +# define JSON_HEDLEY_ALWAYS_INLINE JSON_HEDLEY_INLINE +#endif + +#if defined(JSON_HEDLEY_NEVER_INLINE) + #undef JSON_HEDLEY_NEVER_INLINE +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(noinline) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \ + JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) + #define JSON_HEDLEY_NEVER_INLINE __attribute__((__noinline__)) +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_NEVER_INLINE __declspec(noinline) +#elif JSON_HEDLEY_PGI_VERSION_CHECK(10,2,0) + #define JSON_HEDLEY_NEVER_INLINE _Pragma("noinline") +#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,0,0) && defined(__cplusplus) + #define JSON_HEDLEY_NEVER_INLINE _Pragma("FUNC_CANNOT_INLINE;") +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_NEVER_INLINE _Pragma("inline=never") +#elif JSON_HEDLEY_COMPCERT_VERSION_CHECK(3,2,0) + #define JSON_HEDLEY_NEVER_INLINE __attribute((noinline)) +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(9,0,0) + #define JSON_HEDLEY_NEVER_INLINE __declspec(noinline) +#else + #define JSON_HEDLEY_NEVER_INLINE +#endif + +#if defined(JSON_HEDLEY_PRIVATE) + #undef JSON_HEDLEY_PRIVATE +#endif +#if defined(JSON_HEDLEY_PUBLIC) + #undef JSON_HEDLEY_PUBLIC +#endif +#if defined(JSON_HEDLEY_IMPORT) + #undef JSON_HEDLEY_IMPORT +#endif +#if defined(_WIN32) || defined(__CYGWIN__) +# define JSON_HEDLEY_PRIVATE +# define JSON_HEDLEY_PUBLIC __declspec(dllexport) +# define JSON_HEDLEY_IMPORT __declspec(dllimport) +#else +# if \ + JSON_HEDLEY_HAS_ATTRIBUTE(visibility) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ + ( \ + defined(__TI_EABI__) && \ + ( \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) \ + ) \ + ) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) +# define JSON_HEDLEY_PRIVATE __attribute__((__visibility__("hidden"))) +# define JSON_HEDLEY_PUBLIC __attribute__((__visibility__("default"))) +# else +# define JSON_HEDLEY_PRIVATE +# define JSON_HEDLEY_PUBLIC +# endif +# define JSON_HEDLEY_IMPORT extern +#endif + +#if defined(JSON_HEDLEY_NO_THROW) + #undef JSON_HEDLEY_NO_THROW +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(nothrow) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_NO_THROW __attribute__((__nothrow__)) +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(13,1,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) + #define JSON_HEDLEY_NO_THROW __declspec(nothrow) +#else + #define JSON_HEDLEY_NO_THROW +#endif + +#if defined(JSON_HEDLEY_FALL_THROUGH) + #undef JSON_HEDLEY_FALL_THROUGH +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(fallthrough) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(7,0,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_FALL_THROUGH __attribute__((__fallthrough__)) +#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(clang,fallthrough) + #define JSON_HEDLEY_FALL_THROUGH JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[clang::fallthrough]]) +#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE(fallthrough) + #define JSON_HEDLEY_FALL_THROUGH JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[fallthrough]]) +#elif defined(__fallthrough) /* SAL */ + #define JSON_HEDLEY_FALL_THROUGH __fallthrough +#else + #define JSON_HEDLEY_FALL_THROUGH +#endif + +#if defined(JSON_HEDLEY_RETURNS_NON_NULL) + #undef JSON_HEDLEY_RETURNS_NON_NULL +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(returns_nonnull) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,9,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_RETURNS_NON_NULL __attribute__((__returns_nonnull__)) +#elif defined(_Ret_notnull_) /* SAL */ + #define JSON_HEDLEY_RETURNS_NON_NULL _Ret_notnull_ +#else + #define JSON_HEDLEY_RETURNS_NON_NULL +#endif + +#if defined(JSON_HEDLEY_ARRAY_PARAM) + #undef JSON_HEDLEY_ARRAY_PARAM +#endif +#if \ + defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \ + !defined(__STDC_NO_VLA__) && \ + !defined(__cplusplus) && \ + !defined(JSON_HEDLEY_PGI_VERSION) && \ + !defined(JSON_HEDLEY_TINYC_VERSION) + #define JSON_HEDLEY_ARRAY_PARAM(name) (name) +#else + #define JSON_HEDLEY_ARRAY_PARAM(name) +#endif + +#if defined(JSON_HEDLEY_IS_CONSTANT) + #undef JSON_HEDLEY_IS_CONSTANT +#endif +#if defined(JSON_HEDLEY_REQUIRE_CONSTEXPR) + #undef JSON_HEDLEY_REQUIRE_CONSTEXPR +#endif +/* JSON_HEDLEY_IS_CONSTEXPR_ is for + HEDLEY INTERNAL USE ONLY. API subject to change without notice. */ +#if defined(JSON_HEDLEY_IS_CONSTEXPR_) + #undef JSON_HEDLEY_IS_CONSTEXPR_ +#endif +#if \ + JSON_HEDLEY_HAS_BUILTIN(__builtin_constant_p) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,19) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \ + (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) && !defined(__cplusplus)) || \ + JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_IS_CONSTANT(expr) __builtin_constant_p(expr) +#endif +#if !defined(__cplusplus) +# if \ + JSON_HEDLEY_HAS_BUILTIN(__builtin_types_compatible_p) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ + JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(5,4,0) || \ + JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,24) +#if defined(__INTPTR_TYPE__) + #define JSON_HEDLEY_IS_CONSTEXPR_(expr) __builtin_types_compatible_p(__typeof__((1 ? (void*) ((__INTPTR_TYPE__) ((expr) * 0)) : (int*) 0)), int*) +#else + #include + #define JSON_HEDLEY_IS_CONSTEXPR_(expr) __builtin_types_compatible_p(__typeof__((1 ? (void*) ((intptr_t) ((expr) * 0)) : (int*) 0)), int*) +#endif +# elif \ + ( \ + defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && \ + !defined(JSON_HEDLEY_SUNPRO_VERSION) && \ + !defined(JSON_HEDLEY_PGI_VERSION) && \ + !defined(JSON_HEDLEY_IAR_VERSION)) || \ + (JSON_HEDLEY_HAS_EXTENSION(c_generic_selections) && !defined(JSON_HEDLEY_IAR_VERSION)) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,9,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(17,0,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(12,1,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(5,3,0) +#if defined(__INTPTR_TYPE__) + #define JSON_HEDLEY_IS_CONSTEXPR_(expr) _Generic((1 ? (void*) ((__INTPTR_TYPE__) ((expr) * 0)) : (int*) 0), int*: 1, void*: 0) +#else + #include + #define JSON_HEDLEY_IS_CONSTEXPR_(expr) _Generic((1 ? (void*) ((intptr_t) * 0) : (int*) 0), int*: 1, void*: 0) +#endif +# elif \ + defined(JSON_HEDLEY_GCC_VERSION) || \ + defined(JSON_HEDLEY_INTEL_VERSION) || \ + defined(JSON_HEDLEY_TINYC_VERSION) || \ + defined(JSON_HEDLEY_TI_ARMCL_VERSION) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(18,12,0) || \ + defined(JSON_HEDLEY_TI_CL2000_VERSION) || \ + defined(JSON_HEDLEY_TI_CL6X_VERSION) || \ + defined(JSON_HEDLEY_TI_CL7X_VERSION) || \ + defined(JSON_HEDLEY_TI_CLPRU_VERSION) || \ + defined(__clang__) +# define JSON_HEDLEY_IS_CONSTEXPR_(expr) ( \ + sizeof(void) != \ + sizeof(*( \ + 1 ? \ + ((void*) ((expr) * 0L) ) : \ +((struct { char v[sizeof(void) * 2]; } *) 1) \ + ) \ + ) \ + ) +# endif +#endif +#if defined(JSON_HEDLEY_IS_CONSTEXPR_) + #if !defined(JSON_HEDLEY_IS_CONSTANT) + #define JSON_HEDLEY_IS_CONSTANT(expr) JSON_HEDLEY_IS_CONSTEXPR_(expr) + #endif + #define JSON_HEDLEY_REQUIRE_CONSTEXPR(expr) (JSON_HEDLEY_IS_CONSTEXPR_(expr) ? (expr) : (-1)) +#else + #if !defined(JSON_HEDLEY_IS_CONSTANT) + #define JSON_HEDLEY_IS_CONSTANT(expr) (0) + #endif + #define JSON_HEDLEY_REQUIRE_CONSTEXPR(expr) (expr) +#endif + +#if defined(JSON_HEDLEY_BEGIN_C_DECLS) + #undef JSON_HEDLEY_BEGIN_C_DECLS +#endif +#if defined(JSON_HEDLEY_END_C_DECLS) + #undef JSON_HEDLEY_END_C_DECLS +#endif +#if defined(JSON_HEDLEY_C_DECL) + #undef JSON_HEDLEY_C_DECL +#endif +#if defined(__cplusplus) + #define JSON_HEDLEY_BEGIN_C_DECLS extern "C" { + #define JSON_HEDLEY_END_C_DECLS } + #define JSON_HEDLEY_C_DECL extern "C" +#else + #define JSON_HEDLEY_BEGIN_C_DECLS + #define JSON_HEDLEY_END_C_DECLS + #define JSON_HEDLEY_C_DECL +#endif + +#if defined(JSON_HEDLEY_STATIC_ASSERT) + #undef JSON_HEDLEY_STATIC_ASSERT +#endif +#if \ + !defined(__cplusplus) && ( \ + (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) || \ + (JSON_HEDLEY_HAS_FEATURE(c_static_assert) && !defined(JSON_HEDLEY_INTEL_CL_VERSION)) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(6,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + defined(_Static_assert) \ + ) +# define JSON_HEDLEY_STATIC_ASSERT(expr, message) _Static_assert(expr, message) +#elif \ + (defined(__cplusplus) && (__cplusplus >= 201103L)) || \ + JSON_HEDLEY_MSVC_VERSION_CHECK(16,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) +# define JSON_HEDLEY_STATIC_ASSERT(expr, message) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(static_assert(expr, message)) +#else +# define JSON_HEDLEY_STATIC_ASSERT(expr, message) +#endif + +#if defined(JSON_HEDLEY_NULL) + #undef JSON_HEDLEY_NULL +#endif +#if defined(__cplusplus) + #if __cplusplus >= 201103L + #define JSON_HEDLEY_NULL JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(nullptr) + #elif defined(NULL) + #define JSON_HEDLEY_NULL NULL + #else + #define JSON_HEDLEY_NULL JSON_HEDLEY_STATIC_CAST(void*, 0) + #endif +#elif defined(NULL) + #define JSON_HEDLEY_NULL NULL +#else + #define JSON_HEDLEY_NULL ((void*) 0) +#endif + +#if defined(JSON_HEDLEY_MESSAGE) + #undef JSON_HEDLEY_MESSAGE +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") +# define JSON_HEDLEY_MESSAGE(msg) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS \ + JSON_HEDLEY_PRAGMA(message msg) \ + JSON_HEDLEY_DIAGNOSTIC_POP +#elif \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,4,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) +# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message msg) +#elif JSON_HEDLEY_CRAY_VERSION_CHECK(5,0,0) +# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(_CRI message msg) +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) +# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message(msg)) +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,0,0) +# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message(msg)) +#else +# define JSON_HEDLEY_MESSAGE(msg) +#endif + +#if defined(JSON_HEDLEY_WARNING) + #undef JSON_HEDLEY_WARNING +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") +# define JSON_HEDLEY_WARNING(msg) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS \ + JSON_HEDLEY_PRAGMA(clang warning msg) \ + JSON_HEDLEY_DIAGNOSTIC_POP +#elif \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,8,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(18,4,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) +# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_PRAGMA(GCC warning msg) +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) +# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_PRAGMA(message(msg)) +#else +# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_MESSAGE(msg) +#endif + +#if defined(JSON_HEDLEY_REQUIRE) + #undef JSON_HEDLEY_REQUIRE +#endif +#if defined(JSON_HEDLEY_REQUIRE_MSG) + #undef JSON_HEDLEY_REQUIRE_MSG +#endif +#if JSON_HEDLEY_HAS_ATTRIBUTE(diagnose_if) +# if JSON_HEDLEY_HAS_WARNING("-Wgcc-compat") +# define JSON_HEDLEY_REQUIRE(expr) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \ + __attribute__((diagnose_if(!(expr), #expr, "error"))) \ + JSON_HEDLEY_DIAGNOSTIC_POP +# define JSON_HEDLEY_REQUIRE_MSG(expr,msg) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \ + __attribute__((diagnose_if(!(expr), msg, "error"))) \ + JSON_HEDLEY_DIAGNOSTIC_POP +# else +# define JSON_HEDLEY_REQUIRE(expr) __attribute__((diagnose_if(!(expr), #expr, "error"))) +# define JSON_HEDLEY_REQUIRE_MSG(expr,msg) __attribute__((diagnose_if(!(expr), msg, "error"))) +# endif +#else +# define JSON_HEDLEY_REQUIRE(expr) +# define JSON_HEDLEY_REQUIRE_MSG(expr,msg) +#endif + +#if defined(JSON_HEDLEY_FLAGS) + #undef JSON_HEDLEY_FLAGS +#endif +#if JSON_HEDLEY_HAS_ATTRIBUTE(flag_enum) && (!defined(__cplusplus) || JSON_HEDLEY_HAS_WARNING("-Wbitfield-enum-conversion")) + #define JSON_HEDLEY_FLAGS __attribute__((__flag_enum__)) +#else + #define JSON_HEDLEY_FLAGS +#endif + +#if defined(JSON_HEDLEY_FLAGS_CAST) + #undef JSON_HEDLEY_FLAGS_CAST +#endif +#if JSON_HEDLEY_INTEL_VERSION_CHECK(19,0,0) +# define JSON_HEDLEY_FLAGS_CAST(T, expr) (__extension__ ({ \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("warning(disable:188)") \ + ((T) (expr)); \ + JSON_HEDLEY_DIAGNOSTIC_POP \ + })) +#else +# define JSON_HEDLEY_FLAGS_CAST(T, expr) JSON_HEDLEY_STATIC_CAST(T, expr) +#endif + +#if defined(JSON_HEDLEY_EMPTY_BASES) + #undef JSON_HEDLEY_EMPTY_BASES +#endif +#if \ + (JSON_HEDLEY_MSVC_VERSION_CHECK(19,0,23918) && !JSON_HEDLEY_MSVC_VERSION_CHECK(20,0,0)) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_EMPTY_BASES __declspec(empty_bases) +#else + #define JSON_HEDLEY_EMPTY_BASES +#endif + +/* Remaining macros are deprecated. */ + +#if defined(JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK) + #undef JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK +#endif +#if defined(__clang__) + #define JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major,minor,patch) (0) +#else + #define JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_CLANG_HAS_ATTRIBUTE) + #undef JSON_HEDLEY_CLANG_HAS_ATTRIBUTE +#endif +#define JSON_HEDLEY_CLANG_HAS_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_ATTRIBUTE(attribute) + +#if defined(JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE) + #undef JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE +#endif +#define JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) + +#if defined(JSON_HEDLEY_CLANG_HAS_BUILTIN) + #undef JSON_HEDLEY_CLANG_HAS_BUILTIN +#endif +#define JSON_HEDLEY_CLANG_HAS_BUILTIN(builtin) JSON_HEDLEY_HAS_BUILTIN(builtin) + +#if defined(JSON_HEDLEY_CLANG_HAS_FEATURE) + #undef JSON_HEDLEY_CLANG_HAS_FEATURE +#endif +#define JSON_HEDLEY_CLANG_HAS_FEATURE(feature) JSON_HEDLEY_HAS_FEATURE(feature) + +#if defined(JSON_HEDLEY_CLANG_HAS_EXTENSION) + #undef JSON_HEDLEY_CLANG_HAS_EXTENSION +#endif +#define JSON_HEDLEY_CLANG_HAS_EXTENSION(extension) JSON_HEDLEY_HAS_EXTENSION(extension) + +#if defined(JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE) + #undef JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE +#endif +#define JSON_HEDLEY_CLANG_HAS_DECLSPEC_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) + +#if defined(JSON_HEDLEY_CLANG_HAS_WARNING) + #undef JSON_HEDLEY_CLANG_HAS_WARNING +#endif +#define JSON_HEDLEY_CLANG_HAS_WARNING(warning) JSON_HEDLEY_HAS_WARNING(warning) + +#endif /* !defined(JSON_HEDLEY_VERSION) || (JSON_HEDLEY_VERSION < X) */ + + +// This file contains all internal macro definitions (except those affecting ABI) +// You MUST include macro_unscope.hpp at the end of json.hpp to undef all of them + +// #include + + +// exclude unsupported compilers +#if !defined(JSON_SKIP_UNSUPPORTED_COMPILER_CHECK) + #if defined(__clang__) + #if (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) < 30400 + #error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers" + #endif + #elif defined(__GNUC__) && !(defined(__ICC) || defined(__INTEL_COMPILER)) + #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40800 + #error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers" + #endif + #endif +#endif + +// C++ language standard detection +// if the user manually specified the used c++ version this is skipped +#if !defined(JSON_HAS_CPP_20) && !defined(JSON_HAS_CPP_17) && !defined(JSON_HAS_CPP_14) && !defined(JSON_HAS_CPP_11) + #if (defined(__cplusplus) && __cplusplus >= 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L) + #define JSON_HAS_CPP_20 + #define JSON_HAS_CPP_17 + #define JSON_HAS_CPP_14 + #elif (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464 + #define JSON_HAS_CPP_17 + #define JSON_HAS_CPP_14 + #elif (defined(__cplusplus) && __cplusplus >= 201402L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1) + #define JSON_HAS_CPP_14 + #endif + // the cpp 11 flag is always specified because it is the minimal required version + #define JSON_HAS_CPP_11 +#endif + +#ifdef __has_include + #if __has_include() + #include + #endif +#endif + +#if !defined(JSON_HAS_FILESYSTEM) && !defined(JSON_HAS_EXPERIMENTAL_FILESYSTEM) + #ifdef JSON_HAS_CPP_17 + #if defined(__cpp_lib_filesystem) + #define JSON_HAS_FILESYSTEM 1 + #elif defined(__cpp_lib_experimental_filesystem) + #define JSON_HAS_EXPERIMENTAL_FILESYSTEM 1 + #elif !defined(__has_include) + #define JSON_HAS_EXPERIMENTAL_FILESYSTEM 1 + #elif __has_include() + #define JSON_HAS_FILESYSTEM 1 + #elif __has_include() + #define JSON_HAS_EXPERIMENTAL_FILESYSTEM 1 + #endif + + // std::filesystem does not work on MinGW GCC 8: https://sourceforge.net/p/mingw-w64/bugs/737/ + #if defined(__MINGW32__) && defined(__GNUC__) && __GNUC__ == 8 + #undef JSON_HAS_FILESYSTEM + #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM + #endif + + // no filesystem support before GCC 8: https://en.cppreference.com/w/cpp/compiler_support + #if defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 8 + #undef JSON_HAS_FILESYSTEM + #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM + #endif + + // no filesystem support before Clang 7: https://en.cppreference.com/w/cpp/compiler_support + #if defined(__clang_major__) && __clang_major__ < 7 + #undef JSON_HAS_FILESYSTEM + #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM + #endif + + // no filesystem support before MSVC 19.14: https://en.cppreference.com/w/cpp/compiler_support + #if defined(_MSC_VER) && _MSC_VER < 1914 + #undef JSON_HAS_FILESYSTEM + #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM + #endif + + // no filesystem support before iOS 13 + #if defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED < 130000 + #undef JSON_HAS_FILESYSTEM + #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM + #endif + + // no filesystem support before macOS Catalina + #if defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED < 101500 + #undef JSON_HAS_FILESYSTEM + #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM + #endif + #endif +#endif + +#ifndef JSON_HAS_EXPERIMENTAL_FILESYSTEM + #define JSON_HAS_EXPERIMENTAL_FILESYSTEM 0 +#endif + +#ifndef JSON_HAS_FILESYSTEM + #define JSON_HAS_FILESYSTEM 0 +#endif + +#ifndef JSON_HAS_THREE_WAY_COMPARISON + #if defined(__cpp_impl_three_way_comparison) && __cpp_impl_three_way_comparison >= 201907L \ + && defined(__cpp_lib_three_way_comparison) && __cpp_lib_three_way_comparison >= 201907L + #define JSON_HAS_THREE_WAY_COMPARISON 1 + #else + #define JSON_HAS_THREE_WAY_COMPARISON 0 + #endif +#endif + +#ifndef JSON_HAS_RANGES + // ranges header shipping in GCC 11.1.0 (released 2021-04-27) has syntax error + #if defined(__GLIBCXX__) && __GLIBCXX__ == 20210427 + #define JSON_HAS_RANGES 0 + #elif defined(__cpp_lib_ranges) + #define JSON_HAS_RANGES 1 + #else + #define JSON_HAS_RANGES 0 + #endif +#endif + +#ifndef JSON_HAS_STATIC_RTTI + #if !defined(_HAS_STATIC_RTTI) || _HAS_STATIC_RTTI != 0 + #define JSON_HAS_STATIC_RTTI 1 + #else + #define JSON_HAS_STATIC_RTTI 0 + #endif +#endif + +#ifdef JSON_HAS_CPP_17 + #define JSON_INLINE_VARIABLE inline +#else + #define JSON_INLINE_VARIABLE +#endif + +#if JSON_HEDLEY_HAS_ATTRIBUTE(no_unique_address) + #define JSON_NO_UNIQUE_ADDRESS [[no_unique_address]] +#else + #define JSON_NO_UNIQUE_ADDRESS +#endif + +// disable documentation warnings on clang +#if defined(__clang__) + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wdocumentation" + #pragma clang diagnostic ignored "-Wdocumentation-unknown-command" +#endif + +// allow disabling exceptions +#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)) && !defined(JSON_NOEXCEPTION) + #define JSON_THROW(exception) throw exception + #define JSON_TRY try + #define JSON_CATCH(exception) catch(exception) + #define JSON_INTERNAL_CATCH(exception) catch(exception) +#else + #include + #define JSON_THROW(exception) std::abort() + #define JSON_TRY if(true) + #define JSON_CATCH(exception) if(false) + #define JSON_INTERNAL_CATCH(exception) if(false) +#endif + +// override exception macros +#if defined(JSON_THROW_USER) + #undef JSON_THROW + #define JSON_THROW JSON_THROW_USER +#endif +#if defined(JSON_TRY_USER) + #undef JSON_TRY + #define JSON_TRY JSON_TRY_USER +#endif +#if defined(JSON_CATCH_USER) + #undef JSON_CATCH + #define JSON_CATCH JSON_CATCH_USER + #undef JSON_INTERNAL_CATCH + #define JSON_INTERNAL_CATCH JSON_CATCH_USER +#endif +#if defined(JSON_INTERNAL_CATCH_USER) + #undef JSON_INTERNAL_CATCH + #define JSON_INTERNAL_CATCH JSON_INTERNAL_CATCH_USER +#endif + +// allow overriding assert +#if !defined(JSON_ASSERT) + #include // assert + #define JSON_ASSERT(x) assert(x) +#endif + +// allow to access some private functions (needed by the test suite) +#if defined(JSON_TESTS_PRIVATE) + #define JSON_PRIVATE_UNLESS_TESTED public +#else + #define JSON_PRIVATE_UNLESS_TESTED private +#endif + +/*! +@brief macro to briefly define a mapping between an enum and JSON +@def NLOHMANN_JSON_SERIALIZE_ENUM +@since version 3.4.0 +*/ +#define NLOHMANN_JSON_SERIALIZE_ENUM(ENUM_TYPE, ...) \ + template \ + inline void to_json(BasicJsonType& j, const ENUM_TYPE& e) \ + { \ + static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ + static const std::pair m[] = __VA_ARGS__; \ + auto it = std::find_if(std::begin(m), std::end(m), \ + [e](const std::pair& ej_pair) -> bool \ + { \ + return ej_pair.first == e; \ + }); \ + j = ((it != std::end(m)) ? it : std::begin(m))->second; \ + } \ + template \ + inline void from_json(const BasicJsonType& j, ENUM_TYPE& e) \ + { \ + static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ + static const std::pair m[] = __VA_ARGS__; \ + auto it = std::find_if(std::begin(m), std::end(m), \ + [&j](const std::pair& ej_pair) -> bool \ + { \ + return ej_pair.second == j; \ + }); \ + e = ((it != std::end(m)) ? it : std::begin(m))->first; \ + } + +// Ugly macros to avoid uglier copy-paste when specializing basic_json. They +// may be removed in the future once the class is split. + +#define NLOHMANN_BASIC_JSON_TPL_DECLARATION \ + template class ObjectType, \ + template class ArrayType, \ + class StringType, class BooleanType, class NumberIntegerType, \ + class NumberUnsignedType, class NumberFloatType, \ + template class AllocatorType, \ + template class JSONSerializer, \ + class BinaryType, \ + class CustomBaseClass> + +#define NLOHMANN_BASIC_JSON_TPL \ + basic_json + +// Macros to simplify conversion from/to types + +#define NLOHMANN_JSON_EXPAND( x ) x +#define NLOHMANN_JSON_GET_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, NAME,...) NAME +#define NLOHMANN_JSON_PASTE(...) NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_GET_MACRO(__VA_ARGS__, \ + NLOHMANN_JSON_PASTE64, \ + NLOHMANN_JSON_PASTE63, \ + NLOHMANN_JSON_PASTE62, \ + NLOHMANN_JSON_PASTE61, \ + NLOHMANN_JSON_PASTE60, \ + NLOHMANN_JSON_PASTE59, \ + NLOHMANN_JSON_PASTE58, \ + NLOHMANN_JSON_PASTE57, \ + NLOHMANN_JSON_PASTE56, \ + NLOHMANN_JSON_PASTE55, \ + NLOHMANN_JSON_PASTE54, \ + NLOHMANN_JSON_PASTE53, \ + NLOHMANN_JSON_PASTE52, \ + NLOHMANN_JSON_PASTE51, \ + NLOHMANN_JSON_PASTE50, \ + NLOHMANN_JSON_PASTE49, \ + NLOHMANN_JSON_PASTE48, \ + NLOHMANN_JSON_PASTE47, \ + NLOHMANN_JSON_PASTE46, \ + NLOHMANN_JSON_PASTE45, \ + NLOHMANN_JSON_PASTE44, \ + NLOHMANN_JSON_PASTE43, \ + NLOHMANN_JSON_PASTE42, \ + NLOHMANN_JSON_PASTE41, \ + NLOHMANN_JSON_PASTE40, \ + NLOHMANN_JSON_PASTE39, \ + NLOHMANN_JSON_PASTE38, \ + NLOHMANN_JSON_PASTE37, \ + NLOHMANN_JSON_PASTE36, \ + NLOHMANN_JSON_PASTE35, \ + NLOHMANN_JSON_PASTE34, \ + NLOHMANN_JSON_PASTE33, \ + NLOHMANN_JSON_PASTE32, \ + NLOHMANN_JSON_PASTE31, \ + NLOHMANN_JSON_PASTE30, \ + NLOHMANN_JSON_PASTE29, \ + NLOHMANN_JSON_PASTE28, \ + NLOHMANN_JSON_PASTE27, \ + NLOHMANN_JSON_PASTE26, \ + NLOHMANN_JSON_PASTE25, \ + NLOHMANN_JSON_PASTE24, \ + NLOHMANN_JSON_PASTE23, \ + NLOHMANN_JSON_PASTE22, \ + NLOHMANN_JSON_PASTE21, \ + NLOHMANN_JSON_PASTE20, \ + NLOHMANN_JSON_PASTE19, \ + NLOHMANN_JSON_PASTE18, \ + NLOHMANN_JSON_PASTE17, \ + NLOHMANN_JSON_PASTE16, \ + NLOHMANN_JSON_PASTE15, \ + NLOHMANN_JSON_PASTE14, \ + NLOHMANN_JSON_PASTE13, \ + NLOHMANN_JSON_PASTE12, \ + NLOHMANN_JSON_PASTE11, \ + NLOHMANN_JSON_PASTE10, \ + NLOHMANN_JSON_PASTE9, \ + NLOHMANN_JSON_PASTE8, \ + NLOHMANN_JSON_PASTE7, \ + NLOHMANN_JSON_PASTE6, \ + NLOHMANN_JSON_PASTE5, \ + NLOHMANN_JSON_PASTE4, \ + NLOHMANN_JSON_PASTE3, \ + NLOHMANN_JSON_PASTE2, \ + NLOHMANN_JSON_PASTE1)(__VA_ARGS__)) +#define NLOHMANN_JSON_PASTE2(func, v1) func(v1) +#define NLOHMANN_JSON_PASTE3(func, v1, v2) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE2(func, v2) +#define NLOHMANN_JSON_PASTE4(func, v1, v2, v3) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE3(func, v2, v3) +#define NLOHMANN_JSON_PASTE5(func, v1, v2, v3, v4) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE4(func, v2, v3, v4) +#define NLOHMANN_JSON_PASTE6(func, v1, v2, v3, v4, v5) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE5(func, v2, v3, v4, v5) +#define NLOHMANN_JSON_PASTE7(func, v1, v2, v3, v4, v5, v6) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE6(func, v2, v3, v4, v5, v6) +#define NLOHMANN_JSON_PASTE8(func, v1, v2, v3, v4, v5, v6, v7) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE7(func, v2, v3, v4, v5, v6, v7) +#define NLOHMANN_JSON_PASTE9(func, v1, v2, v3, v4, v5, v6, v7, v8) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE8(func, v2, v3, v4, v5, v6, v7, v8) +#define NLOHMANN_JSON_PASTE10(func, v1, v2, v3, v4, v5, v6, v7, v8, v9) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE9(func, v2, v3, v4, v5, v6, v7, v8, v9) +#define NLOHMANN_JSON_PASTE11(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE10(func, v2, v3, v4, v5, v6, v7, v8, v9, v10) +#define NLOHMANN_JSON_PASTE12(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE11(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11) +#define NLOHMANN_JSON_PASTE13(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE12(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12) +#define NLOHMANN_JSON_PASTE14(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE13(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13) +#define NLOHMANN_JSON_PASTE15(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE14(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14) +#define NLOHMANN_JSON_PASTE16(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE15(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15) +#define NLOHMANN_JSON_PASTE17(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE16(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16) +#define NLOHMANN_JSON_PASTE18(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE17(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17) +#define NLOHMANN_JSON_PASTE19(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE18(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18) +#define NLOHMANN_JSON_PASTE20(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE19(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19) +#define NLOHMANN_JSON_PASTE21(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE20(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20) +#define NLOHMANN_JSON_PASTE22(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE21(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21) +#define NLOHMANN_JSON_PASTE23(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE22(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22) +#define NLOHMANN_JSON_PASTE24(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE23(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23) +#define NLOHMANN_JSON_PASTE25(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE24(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24) +#define NLOHMANN_JSON_PASTE26(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE25(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25) +#define NLOHMANN_JSON_PASTE27(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE26(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26) +#define NLOHMANN_JSON_PASTE28(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE27(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27) +#define NLOHMANN_JSON_PASTE29(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE28(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28) +#define NLOHMANN_JSON_PASTE30(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE29(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29) +#define NLOHMANN_JSON_PASTE31(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE30(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30) +#define NLOHMANN_JSON_PASTE32(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE31(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31) +#define NLOHMANN_JSON_PASTE33(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE32(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32) +#define NLOHMANN_JSON_PASTE34(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE33(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33) +#define NLOHMANN_JSON_PASTE35(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE34(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34) +#define NLOHMANN_JSON_PASTE36(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE35(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35) +#define NLOHMANN_JSON_PASTE37(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE36(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36) +#define NLOHMANN_JSON_PASTE38(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE37(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37) +#define NLOHMANN_JSON_PASTE39(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE38(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38) +#define NLOHMANN_JSON_PASTE40(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE39(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39) +#define NLOHMANN_JSON_PASTE41(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE40(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40) +#define NLOHMANN_JSON_PASTE42(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE41(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41) +#define NLOHMANN_JSON_PASTE43(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE42(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42) +#define NLOHMANN_JSON_PASTE44(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE43(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43) +#define NLOHMANN_JSON_PASTE45(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE44(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44) +#define NLOHMANN_JSON_PASTE46(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE45(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45) +#define NLOHMANN_JSON_PASTE47(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE46(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46) +#define NLOHMANN_JSON_PASTE48(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE47(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47) +#define NLOHMANN_JSON_PASTE49(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE48(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48) +#define NLOHMANN_JSON_PASTE50(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE49(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49) +#define NLOHMANN_JSON_PASTE51(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE50(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50) +#define NLOHMANN_JSON_PASTE52(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE51(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51) +#define NLOHMANN_JSON_PASTE53(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE52(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52) +#define NLOHMANN_JSON_PASTE54(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE53(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53) +#define NLOHMANN_JSON_PASTE55(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE54(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54) +#define NLOHMANN_JSON_PASTE56(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE55(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55) +#define NLOHMANN_JSON_PASTE57(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE56(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56) +#define NLOHMANN_JSON_PASTE58(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE57(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57) +#define NLOHMANN_JSON_PASTE59(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE58(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58) +#define NLOHMANN_JSON_PASTE60(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE59(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59) +#define NLOHMANN_JSON_PASTE61(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE60(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60) +#define NLOHMANN_JSON_PASTE62(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE61(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61) +#define NLOHMANN_JSON_PASTE63(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE62(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62) +#define NLOHMANN_JSON_PASTE64(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62, v63) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE63(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62, v63) + +#define NLOHMANN_JSON_TO(v1) nlohmann_json_j[#v1] = nlohmann_json_t.v1; +#define NLOHMANN_JSON_FROM(v1) nlohmann_json_j.at(#v1).get_to(nlohmann_json_t.v1); +#define NLOHMANN_JSON_FROM_WITH_DEFAULT(v1) nlohmann_json_t.v1 = nlohmann_json_j.value(#v1, nlohmann_json_default_obj.v1); + +/*! +@brief macro +@def NLOHMANN_DEFINE_TYPE_INTRUSIVE +@since version 3.9.0 +*/ +#define NLOHMANN_DEFINE_TYPE_INTRUSIVE(Type, ...) \ + friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + friend void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } + +#define NLOHMANN_DEFINE_TYPE_INTRUSIVE_WITH_DEFAULT(Type, ...) \ + friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + friend void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } + +#define NLOHMANN_DEFINE_TYPE_INTRUSIVE_ONLY_SERIALIZE(Type, ...) \ + friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE +@since version 3.9.0 +*/ +#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Type, ...) \ + inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + inline void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } + +#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE(Type, ...) \ + inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } + +#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_WITH_DEFAULT(Type, ...) \ + inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + inline void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } + +// inspired from https://stackoverflow.com/a/26745591 +// allows to call any std function as if (e.g. with begin): +// using std::begin; begin(x); +// +// it allows using the detected idiom to retrieve the return type +// of such an expression +#define NLOHMANN_CAN_CALL_STD_FUNC_IMPL(std_name) \ + namespace detail { \ + using std::std_name; \ + \ + template \ + using result_of_##std_name = decltype(std_name(std::declval()...)); \ + } \ + \ + namespace detail2 { \ + struct std_name##_tag \ + { \ + }; \ + \ + template \ + std_name##_tag std_name(T&&...); \ + \ + template \ + using result_of_##std_name = decltype(std_name(std::declval()...)); \ + \ + template \ + struct would_call_std_##std_name \ + { \ + static constexpr auto const value = ::nlohmann::detail:: \ + is_detected_exact::value; \ + }; \ + } /* namespace detail2 */ \ + \ + template \ + struct would_call_std_##std_name : detail2::would_call_std_##std_name \ + { \ + } + +#ifndef JSON_USE_IMPLICIT_CONVERSIONS + #define JSON_USE_IMPLICIT_CONVERSIONS 1 +#endif + +#if JSON_USE_IMPLICIT_CONVERSIONS + #define JSON_EXPLICIT +#else + #define JSON_EXPLICIT explicit +#endif + +#ifndef JSON_DISABLE_ENUM_SERIALIZATION + #define JSON_DISABLE_ENUM_SERIALIZATION 0 +#endif + +#ifndef JSON_USE_GLOBAL_UDLS + #define JSON_USE_GLOBAL_UDLS 1 +#endif + +#if JSON_HAS_THREE_WAY_COMPARISON + #include // partial_ordering +#endif + +NLOHMANN_JSON_NAMESPACE_BEGIN +namespace detail +{ + +/////////////////////////// +// JSON type enumeration // +/////////////////////////// + +/*! +@brief the JSON type enumeration + +This enumeration collects the different JSON types. It is internally used to +distinguish the stored values, and the functions @ref basic_json::is_null(), +@ref basic_json::is_object(), @ref basic_json::is_array(), +@ref basic_json::is_string(), @ref basic_json::is_boolean(), +@ref basic_json::is_number() (with @ref basic_json::is_number_integer(), +@ref basic_json::is_number_unsigned(), and @ref basic_json::is_number_float()), +@ref basic_json::is_discarded(), @ref basic_json::is_primitive(), and +@ref basic_json::is_structured() rely on it. + +@note There are three enumeration entries (number_integer, number_unsigned, and +number_float), because the library distinguishes these three types for numbers: +@ref basic_json::number_unsigned_t is used for unsigned integers, +@ref basic_json::number_integer_t is used for signed integers, and +@ref basic_json::number_float_t is used for floating-point numbers or to +approximate integers which do not fit in the limits of their respective type. + +@sa see @ref basic_json::basic_json(const value_t value_type) -- create a JSON +value with the default value for a given type + +@since version 1.0.0 +*/ +enum class value_t : std::uint8_t +{ + null, ///< null value + object, ///< object (unordered set of name/value pairs) + array, ///< array (ordered collection of values) + string, ///< string value + boolean, ///< boolean value + number_integer, ///< number value (signed integer) + number_unsigned, ///< number value (unsigned integer) + number_float, ///< number value (floating-point) + binary, ///< binary array (ordered collection of bytes) + discarded ///< discarded by the parser callback function +}; + +/*! +@brief comparison operator for JSON types + +Returns an ordering that is similar to Python: +- order: null < boolean < number < object < array < string < binary +- furthermore, each type is not smaller than itself +- discarded values are not comparable +- binary is represented as a b"" string in python and directly comparable to a + string; however, making a binary array directly comparable with a string would + be surprising behavior in a JSON file. + +@since version 1.0.0 +*/ +#if JSON_HAS_THREE_WAY_COMPARISON + inline std::partial_ordering operator<=>(const value_t lhs, const value_t rhs) noexcept // *NOPAD* +#else + inline bool operator<(const value_t lhs, const value_t rhs) noexcept +#endif +{ + static constexpr std::array order = {{ + 0 /* null */, 3 /* object */, 4 /* array */, 5 /* string */, + 1 /* boolean */, 2 /* integer */, 2 /* unsigned */, 2 /* float */, + 6 /* binary */ + } + }; + + const auto l_index = static_cast(lhs); + const auto r_index = static_cast(rhs); +#if JSON_HAS_THREE_WAY_COMPARISON + if (l_index < order.size() && r_index < order.size()) + { + return order[l_index] <=> order[r_index]; // *NOPAD* + } + return std::partial_ordering::unordered; +#else + return l_index < order.size() && r_index < order.size() && order[l_index] < order[r_index]; +#endif +} + +// GCC selects the built-in operator< over an operator rewritten from +// a user-defined spaceship operator +// Clang, MSVC, and ICC select the rewritten candidate +// (see GCC bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105200) +#if JSON_HAS_THREE_WAY_COMPARISON && defined(__GNUC__) +inline bool operator<(const value_t lhs, const value_t rhs) noexcept +{ + return std::is_lt(lhs <=> rhs); // *NOPAD* +} +#endif + +} // namespace detail +NLOHMANN_JSON_NAMESPACE_END + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +// #include + + +NLOHMANN_JSON_NAMESPACE_BEGIN +namespace detail +{ + +/*! +@brief replace all occurrences of a substring by another string + +@param[in,out] s the string to manipulate; changed so that all + occurrences of @a f are replaced with @a t +@param[in] f the substring to replace with @a t +@param[in] t the string to replace @a f + +@pre The search string @a f must not be empty. **This precondition is +enforced with an assertion.** + +@since version 2.0.0 +*/ +template +inline void replace_substring(StringType& s, const StringType& f, + const StringType& t) +{ + JSON_ASSERT(!f.empty()); + for (auto pos = s.find(f); // find first occurrence of f + pos != StringType::npos; // make sure f was found + s.replace(pos, f.size(), t), // replace with t, and + pos = s.find(f, pos + t.size())) // find next occurrence of f + {} +} + +/*! + * @brief string escaping as described in RFC 6901 (Sect. 4) + * @param[in] s string to escape + * @return escaped string + * + * Note the order of escaping "~" to "~0" and "/" to "~1" is important. + */ +template +inline StringType escape(StringType s) +{ + replace_substring(s, StringType{"~"}, StringType{"~0"}); + replace_substring(s, StringType{"/"}, StringType{"~1"}); + return s; +} + +/*! + * @brief string unescaping as described in RFC 6901 (Sect. 4) + * @param[in] s string to unescape + * @return unescaped string + * + * Note the order of escaping "~1" to "/" and "~0" to "~" is important. + */ +template +static void unescape(StringType& s) +{ + replace_substring(s, StringType{"~1"}, StringType{"/"}); + replace_substring(s, StringType{"~0"}, StringType{"~"}); +} + +} // namespace detail +NLOHMANN_JSON_NAMESPACE_END + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include // size_t + +// #include + + +NLOHMANN_JSON_NAMESPACE_BEGIN +namespace detail +{ + +/// struct to capture the start position of the current token +struct position_t +{ + /// the total number of characters read + std::size_t chars_read_total = 0; + /// the number of characters read in the current line + std::size_t chars_read_current_line = 0; + /// the number of lines read + std::size_t lines_read = 0; + + /// conversion to size_t to preserve SAX interface + constexpr operator size_t() const + { + return chars_read_total; + } +}; + +} // namespace detail +NLOHMANN_JSON_NAMESPACE_END + +// #include + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2018 The Abseil Authors +// SPDX-License-Identifier: MIT + + + +#include // array +#include // size_t +#include // conditional, enable_if, false_type, integral_constant, is_constructible, is_integral, is_same, remove_cv, remove_reference, true_type +#include // index_sequence, make_index_sequence, index_sequence_for + +// #include + + +NLOHMANN_JSON_NAMESPACE_BEGIN +namespace detail +{ + +template +using uncvref_t = typename std::remove_cv::type>::type; + +#ifdef JSON_HAS_CPP_14 + +// the following utilities are natively available in C++14 +using std::enable_if_t; +using std::index_sequence; +using std::make_index_sequence; +using std::index_sequence_for; + +#else + +// alias templates to reduce boilerplate +template +using enable_if_t = typename std::enable_if::type; + +// The following code is taken from https://github.com/abseil/abseil-cpp/blob/10cb35e459f5ecca5b2ff107635da0bfa41011b4/absl/utility/utility.h +// which is part of Google Abseil (https://github.com/abseil/abseil-cpp), licensed under the Apache License 2.0. + +//// START OF CODE FROM GOOGLE ABSEIL + +// integer_sequence +// +// Class template representing a compile-time integer sequence. An instantiation +// of `integer_sequence` has a sequence of integers encoded in its +// type through its template arguments (which is a common need when +// working with C++11 variadic templates). `absl::integer_sequence` is designed +// to be a drop-in replacement for C++14's `std::integer_sequence`. +// +// Example: +// +// template< class T, T... Ints > +// void user_function(integer_sequence); +// +// int main() +// { +// // user_function's `T` will be deduced to `int` and `Ints...` +// // will be deduced to `0, 1, 2, 3, 4`. +// user_function(make_integer_sequence()); +// } +template +struct integer_sequence +{ + using value_type = T; + static constexpr std::size_t size() noexcept + { + return sizeof...(Ints); + } +}; + +// index_sequence +// +// A helper template for an `integer_sequence` of `size_t`, +// `absl::index_sequence` is designed to be a drop-in replacement for C++14's +// `std::index_sequence`. +template +using index_sequence = integer_sequence; + +namespace utility_internal +{ + +template +struct Extend; + +// Note that SeqSize == sizeof...(Ints). It's passed explicitly for efficiency. +template +struct Extend, SeqSize, 0> +{ + using type = integer_sequence < T, Ints..., (Ints + SeqSize)... >; +}; + +template +struct Extend, SeqSize, 1> +{ + using type = integer_sequence < T, Ints..., (Ints + SeqSize)..., 2 * SeqSize >; +}; + +// Recursion helper for 'make_integer_sequence'. +// 'Gen::type' is an alias for 'integer_sequence'. +template +struct Gen +{ + using type = + typename Extend < typename Gen < T, N / 2 >::type, N / 2, N % 2 >::type; +}; + +template +struct Gen +{ + using type = integer_sequence; +}; + +} // namespace utility_internal + +// Compile-time sequences of integers + +// make_integer_sequence +// +// This template alias is equivalent to +// `integer_sequence`, and is designed to be a drop-in +// replacement for C++14's `std::make_integer_sequence`. +template +using make_integer_sequence = typename utility_internal::Gen::type; + +// make_index_sequence +// +// This template alias is equivalent to `index_sequence<0, 1, ..., N-1>`, +// and is designed to be a drop-in replacement for C++14's +// `std::make_index_sequence`. +template +using make_index_sequence = make_integer_sequence; + +// index_sequence_for +// +// Converts a typename pack into an index sequence of the same length, and +// is designed to be a drop-in replacement for C++14's +// `std::index_sequence_for()` +template +using index_sequence_for = make_index_sequence; + +//// END OF CODE FROM GOOGLE ABSEIL + +#endif + +// dispatch utility (taken from ranges-v3) +template struct priority_tag : priority_tag < N - 1 > {}; +template<> struct priority_tag<0> {}; + +// taken from ranges-v3 +template +struct static_const +{ + static JSON_INLINE_VARIABLE constexpr T value{}; +}; + +#ifndef JSON_HAS_CPP_17 + template + constexpr T static_const::value; +#endif + +template +inline constexpr std::array make_array(Args&& ... args) +{ + return std::array {{static_cast(std::forward(args))...}}; +} + +} // namespace detail +NLOHMANN_JSON_NAMESPACE_END + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include // numeric_limits +#include // false_type, is_constructible, is_integral, is_same, true_type +#include // declval +#include // tuple +#include // char_traits + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include // random_access_iterator_tag + +// #include + +// #include + +// #include + + +NLOHMANN_JSON_NAMESPACE_BEGIN +namespace detail +{ + +template +struct iterator_types {}; + +template +struct iterator_types < + It, + void_t> +{ + using difference_type = typename It::difference_type; + using value_type = typename It::value_type; + using pointer = typename It::pointer; + using reference = typename It::reference; + using iterator_category = typename It::iterator_category; +}; + +// This is required as some compilers implement std::iterator_traits in a way that +// doesn't work with SFINAE. See https://github.com/nlohmann/json/issues/1341. +template +struct iterator_traits +{ +}; + +template +struct iterator_traits < T, enable_if_t < !std::is_pointer::value >> + : iterator_types +{ +}; + +template +struct iterator_traits::value>> +{ + using iterator_category = std::random_access_iterator_tag; + using value_type = T; + using difference_type = ptrdiff_t; + using pointer = T*; + using reference = T&; +}; + +} // namespace detail +NLOHMANN_JSON_NAMESPACE_END + +// #include + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +// #include + + +NLOHMANN_JSON_NAMESPACE_BEGIN + +NLOHMANN_CAN_CALL_STD_FUNC_IMPL(begin); + +NLOHMANN_JSON_NAMESPACE_END + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +// #include + + +NLOHMANN_JSON_NAMESPACE_BEGIN + +NLOHMANN_CAN_CALL_STD_FUNC_IMPL(end); + +NLOHMANN_JSON_NAMESPACE_END + +// #include + +// #include + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + +#ifndef INCLUDE_NLOHMANN_JSON_FWD_HPP_ + #define INCLUDE_NLOHMANN_JSON_FWD_HPP_ + + #include // int64_t, uint64_t + #include // map + #include // allocator + #include // string + #include // vector + + // #include + + + /*! + @brief namespace for Niels Lohmann + @see https://github.com/nlohmann + @since version 1.0.0 + */ + NLOHMANN_JSON_NAMESPACE_BEGIN + + /*! + @brief default JSONSerializer template argument + + This serializer ignores the template arguments and uses ADL + ([argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl)) + for serialization. + */ + template + struct adl_serializer; + + /// a class to store JSON values + /// @sa https://json.nlohmann.me/api/basic_json/ + template class ObjectType = + std::map, + template class ArrayType = std::vector, + class StringType = std::string, class BooleanType = bool, + class NumberIntegerType = std::int64_t, + class NumberUnsignedType = std::uint64_t, + class NumberFloatType = double, + template class AllocatorType = std::allocator, + template class JSONSerializer = + adl_serializer, + class BinaryType = std::vector, // cppcheck-suppress syntaxError + class CustomBaseClass = void> + class basic_json; + + /// @brief JSON Pointer defines a string syntax for identifying a specific value within a JSON document + /// @sa https://json.nlohmann.me/api/json_pointer/ + template + class json_pointer; + + /*! + @brief default specialization + @sa https://json.nlohmann.me/api/json/ + */ + using json = basic_json<>; + + /// @brief a minimal map-like container that preserves insertion order + /// @sa https://json.nlohmann.me/api/ordered_map/ + template + struct ordered_map; + + /// @brief specialization that maintains the insertion order of object keys + /// @sa https://json.nlohmann.me/api/ordered_json/ + using ordered_json = basic_json; + + NLOHMANN_JSON_NAMESPACE_END + +#endif // INCLUDE_NLOHMANN_JSON_FWD_HPP_ + + +NLOHMANN_JSON_NAMESPACE_BEGIN +/*! +@brief detail namespace with internal helper functions + +This namespace collects functions that should not be exposed, +implementations of some @ref basic_json methods, and meta-programming helpers. + +@since version 2.1.0 +*/ +namespace detail +{ + +///////////// +// helpers // +///////////// + +// Note to maintainers: +// +// Every trait in this file expects a non CV-qualified type. +// The only exceptions are in the 'aliases for detected' section +// (i.e. those of the form: decltype(T::member_function(std::declval()))) +// +// In this case, T has to be properly CV-qualified to constraint the function arguments +// (e.g. to_json(BasicJsonType&, const T&)) + +template struct is_basic_json : std::false_type {}; + +NLOHMANN_BASIC_JSON_TPL_DECLARATION +struct is_basic_json : std::true_type {}; + +// used by exceptions create() member functions +// true_type for pointer to possibly cv-qualified basic_json or std::nullptr_t +// false_type otherwise +template +struct is_basic_json_context : + std::integral_constant < bool, + is_basic_json::type>::type>::value + || std::is_same::value > +{}; + +////////////////////// +// json_ref helpers // +////////////////////// + +template +class json_ref; + +template +struct is_json_ref : std::false_type {}; + +template +struct is_json_ref> : std::true_type {}; + +////////////////////////// +// aliases for detected // +////////////////////////// + +template +using mapped_type_t = typename T::mapped_type; + +template +using key_type_t = typename T::key_type; + +template +using value_type_t = typename T::value_type; + +template +using difference_type_t = typename T::difference_type; + +template +using pointer_t = typename T::pointer; + +template +using reference_t = typename T::reference; + +template +using iterator_category_t = typename T::iterator_category; + +template +using to_json_function = decltype(T::to_json(std::declval()...)); + +template +using from_json_function = decltype(T::from_json(std::declval()...)); + +template +using get_template_function = decltype(std::declval().template get()); + +// trait checking if JSONSerializer::from_json(json const&, udt&) exists +template +struct has_from_json : std::false_type {}; + +// trait checking if j.get is valid +// use this trait instead of std::is_constructible or std::is_convertible, +// both rely on, or make use of implicit conversions, and thus fail when T +// has several constructors/operator= (see https://github.com/nlohmann/json/issues/958) +template +struct is_getable +{ + static constexpr bool value = is_detected::value; +}; + +template +struct has_from_json < BasicJsonType, T, enable_if_t < !is_basic_json::value >> +{ + using serializer = typename BasicJsonType::template json_serializer; + + static constexpr bool value = + is_detected_exact::value; +}; + +// This trait checks if JSONSerializer::from_json(json const&) exists +// this overload is used for non-default-constructible user-defined-types +template +struct has_non_default_from_json : std::false_type {}; + +template +struct has_non_default_from_json < BasicJsonType, T, enable_if_t < !is_basic_json::value >> +{ + using serializer = typename BasicJsonType::template json_serializer; + + static constexpr bool value = + is_detected_exact::value; +}; + +// This trait checks if BasicJsonType::json_serializer::to_json exists +// Do not evaluate the trait when T is a basic_json type, to avoid template instantiation infinite recursion. +template +struct has_to_json : std::false_type {}; + +template +struct has_to_json < BasicJsonType, T, enable_if_t < !is_basic_json::value >> +{ + using serializer = typename BasicJsonType::template json_serializer; + + static constexpr bool value = + is_detected_exact::value; +}; + +template +using detect_key_compare = typename T::key_compare; + +template +struct has_key_compare : std::integral_constant::value> {}; + +// obtains the actual object key comparator +template +struct actual_object_comparator +{ + using object_t = typename BasicJsonType::object_t; + using object_comparator_t = typename BasicJsonType::default_object_comparator_t; + using type = typename std::conditional < has_key_compare::value, + typename object_t::key_compare, object_comparator_t>::type; +}; + +template +using actual_object_comparator_t = typename actual_object_comparator::type; + +///////////////// +// char_traits // +///////////////// + +// Primary template of char_traits calls std char_traits +template +struct char_traits : std::char_traits +{}; + +// Explicitly define char traits for unsigned char since it is not standard +template<> +struct char_traits : std::char_traits +{ + using char_type = unsigned char; + using int_type = uint64_t; + + // Redefine to_int_type function + static int_type to_int_type(char_type c) noexcept + { + return static_cast(c); + } + + static char_type to_char_type(int_type i) noexcept + { + return static_cast(i); + } + + static constexpr int_type eof() noexcept + { + return static_cast(EOF); + } +}; + +// Explicitly define char traits for signed char since it is not standard +template<> +struct char_traits : std::char_traits +{ + using char_type = signed char; + using int_type = uint64_t; + + // Redefine to_int_type function + static int_type to_int_type(char_type c) noexcept + { + return static_cast(c); + } + + static char_type to_char_type(int_type i) noexcept + { + return static_cast(i); + } + + static constexpr int_type eof() noexcept + { + return static_cast(EOF); + } +}; + +/////////////////// +// is_ functions // +/////////////////// + +// https://en.cppreference.com/w/cpp/types/conjunction +template struct conjunction : std::true_type { }; +template struct conjunction : B { }; +template +struct conjunction +: std::conditional(B::value), conjunction, B>::type {}; + +// https://en.cppreference.com/w/cpp/types/negation +template struct negation : std::integral_constant < bool, !B::value > { }; + +// Reimplementation of is_constructible and is_default_constructible, due to them being broken for +// std::pair and std::tuple until LWG 2367 fix (see https://cplusplus.github.io/LWG/lwg-defects.html#2367). +// This causes compile errors in e.g. clang 3.5 or gcc 4.9. +template +struct is_default_constructible : std::is_default_constructible {}; + +template +struct is_default_constructible> + : conjunction, is_default_constructible> {}; + +template +struct is_default_constructible> + : conjunction, is_default_constructible> {}; + +template +struct is_default_constructible> + : conjunction...> {}; + +template +struct is_default_constructible> + : conjunction...> {}; + +template +struct is_constructible : std::is_constructible {}; + +template +struct is_constructible> : is_default_constructible> {}; + +template +struct is_constructible> : is_default_constructible> {}; + +template +struct is_constructible> : is_default_constructible> {}; + +template +struct is_constructible> : is_default_constructible> {}; + +template +struct is_iterator_traits : std::false_type {}; + +template +struct is_iterator_traits> +{ + private: + using traits = iterator_traits; + + public: + static constexpr auto value = + is_detected::value && + is_detected::value && + is_detected::value && + is_detected::value && + is_detected::value; +}; + +template +struct is_range +{ + private: + using t_ref = typename std::add_lvalue_reference::type; + + using iterator = detected_t; + using sentinel = detected_t; + + // to be 100% correct, it should use https://en.cppreference.com/w/cpp/iterator/input_or_output_iterator + // and https://en.cppreference.com/w/cpp/iterator/sentinel_for + // but reimplementing these would be too much work, as a lot of other concepts are used underneath + static constexpr auto is_iterator_begin = + is_iterator_traits>::value; + + public: + static constexpr bool value = !std::is_same::value && !std::is_same::value && is_iterator_begin; +}; + +template +using iterator_t = enable_if_t::value, result_of_begin())>>; + +template +using range_value_t = value_type_t>>; + +// The following implementation of is_complete_type is taken from +// https://blogs.msdn.microsoft.com/vcblog/2015/12/02/partial-support-for-expression-sfinae-in-vs-2015-update-1/ +// and is written by Xiang Fan who agreed to using it in this library. + +template +struct is_complete_type : std::false_type {}; + +template +struct is_complete_type : std::true_type {}; + +template +struct is_compatible_object_type_impl : std::false_type {}; + +template +struct is_compatible_object_type_impl < + BasicJsonType, CompatibleObjectType, + enable_if_t < is_detected::value&& + is_detected::value >> +{ + using object_t = typename BasicJsonType::object_t; + + // macOS's is_constructible does not play well with nonesuch... + static constexpr bool value = + is_constructible::value && + is_constructible::value; +}; + +template +struct is_compatible_object_type + : is_compatible_object_type_impl {}; + +template +struct is_constructible_object_type_impl : std::false_type {}; + +template +struct is_constructible_object_type_impl < + BasicJsonType, ConstructibleObjectType, + enable_if_t < is_detected::value&& + is_detected::value >> +{ + using object_t = typename BasicJsonType::object_t; + + static constexpr bool value = + (is_default_constructible::value && + (std::is_move_assignable::value || + std::is_copy_assignable::value) && + (is_constructible::value && + std::is_same < + typename object_t::mapped_type, + typename ConstructibleObjectType::mapped_type >::value)) || + (has_from_json::value || + has_non_default_from_json < + BasicJsonType, + typename ConstructibleObjectType::mapped_type >::value); +}; + +template +struct is_constructible_object_type + : is_constructible_object_type_impl {}; + +template +struct is_compatible_string_type +{ + static constexpr auto value = + is_constructible::value; +}; + +template +struct is_constructible_string_type +{ + // launder type through decltype() to fix compilation failure on ICPC +#ifdef __INTEL_COMPILER + using laundered_type = decltype(std::declval()); +#else + using laundered_type = ConstructibleStringType; +#endif + + static constexpr auto value = + conjunction < + is_constructible, + is_detected_exact>::value; +}; + +template +struct is_compatible_array_type_impl : std::false_type {}; + +template +struct is_compatible_array_type_impl < + BasicJsonType, CompatibleArrayType, + enable_if_t < + is_detected::value&& + is_iterator_traits>>::value&& +// special case for types like std::filesystem::path whose iterator's value_type are themselves +// c.f. https://github.com/nlohmann/json/pull/3073 + !std::is_same>::value >> +{ + static constexpr bool value = + is_constructible>::value; +}; + +template +struct is_compatible_array_type + : is_compatible_array_type_impl {}; + +template +struct is_constructible_array_type_impl : std::false_type {}; + +template +struct is_constructible_array_type_impl < + BasicJsonType, ConstructibleArrayType, + enable_if_t::value >> + : std::true_type {}; + +template +struct is_constructible_array_type_impl < + BasicJsonType, ConstructibleArrayType, + enable_if_t < !std::is_same::value&& + !is_compatible_string_type::value&& + is_default_constructible::value&& +(std::is_move_assignable::value || + std::is_copy_assignable::value)&& +is_detected::value&& +is_iterator_traits>>::value&& +is_detected::value&& +// special case for types like std::filesystem::path whose iterator's value_type are themselves +// c.f. https://github.com/nlohmann/json/pull/3073 +!std::is_same>::value&& + is_complete_type < + detected_t>::value >> +{ + using value_type = range_value_t; + + static constexpr bool value = + std::is_same::value || + has_from_json::value || + has_non_default_from_json < + BasicJsonType, + value_type >::value; +}; + +template +struct is_constructible_array_type + : is_constructible_array_type_impl {}; + +template +struct is_compatible_integer_type_impl : std::false_type {}; + +template +struct is_compatible_integer_type_impl < + RealIntegerType, CompatibleNumberIntegerType, + enable_if_t < std::is_integral::value&& + std::is_integral::value&& + !std::is_same::value >> +{ + // is there an assert somewhere on overflows? + using RealLimits = std::numeric_limits; + using CompatibleLimits = std::numeric_limits; + + static constexpr auto value = + is_constructible::value && + CompatibleLimits::is_integer && + RealLimits::is_signed == CompatibleLimits::is_signed; +}; + +template +struct is_compatible_integer_type + : is_compatible_integer_type_impl {}; + +template +struct is_compatible_type_impl: std::false_type {}; + +template +struct is_compatible_type_impl < + BasicJsonType, CompatibleType, + enable_if_t::value >> +{ + static constexpr bool value = + has_to_json::value; +}; + +template +struct is_compatible_type + : is_compatible_type_impl {}; + +template +struct is_constructible_tuple : std::false_type {}; + +template +struct is_constructible_tuple> : conjunction...> {}; + +template +struct is_json_iterator_of : std::false_type {}; + +template +struct is_json_iterator_of : std::true_type {}; + +template +struct is_json_iterator_of : std::true_type +{}; + +// checks if a given type T is a template specialization of Primary +template