From ff9e53ebcb9d909d2282957212865d96ee9f1fd6 Mon Sep 17 00:00:00 2001
From: silencht
Date: Mon, 7 Jul 2025 16:11:57 +0800
Subject: [PATCH] [big release] Please refer README: Release Note
---
.gitmodules | 6 +
LICENSE | 7 +-
README.md | 455 ++--
README_ja-JP.md | 556 ++--
README_zh-CN.md | 431 ++--
requirements.txt | 20 +-
teleop/open_television/__init__.py | 4 -
teleop/open_television/_test_television.py | 86 -
teleop/open_television/_test_tv_wrapper.py | 95 -
teleop/open_television/setup.py | 48 -
teleop/open_television/television.py | 514 ----
teleop/open_television/tv_wrapper.py | 410 ---
teleop/robot_control/dex-retargeting | 1 +
.../dex_retargeting/CITATION.cff | 22 -
teleop/robot_control/dex_retargeting/LICENSE | 21 -
.../robot_control/dex_retargeting/__init__.py | 1 -
.../dex_retargeting/constants.py | 85 -
.../dex_retargeting/kinematics_adaptor.py | 102 -
.../dex_retargeting/optimizer.py | 516 ----
.../dex_retargeting/optimizer_utils.py | 17 -
.../dex_retargeting/retargeting_config.py | 255 --
.../dex_retargeting/robot_wrapper.py | 93 -
.../dex_retargeting/seq_retarget.py | 151 --
.../robot_control/dex_retargeting/yourdfpy.py | 2237 -----------------
teleop/robot_control/hand_retargeting.py | 2 +-
teleop/robot_control/robot_hand_unitree.py | 4 +-
teleop/teleop_hand_and_arm.py | 8 +-
teleop/televuer | 1 +
...e-unitree-opject-unitree_sim_isaaclab-.txt | 1 -
teleop/utils/sim_state_topic.py | 53 +-
30 files changed, 750 insertions(+), 5452 deletions(-)
create mode 100644 .gitmodules
delete mode 100644 teleop/open_television/__init__.py
delete mode 100644 teleop/open_television/_test_television.py
delete mode 100644 teleop/open_television/_test_tv_wrapper.py
delete mode 100644 teleop/open_television/setup.py
delete mode 100644 teleop/open_television/television.py
delete mode 100644 teleop/open_television/tv_wrapper.py
create mode 160000 teleop/robot_control/dex-retargeting
delete mode 100644 teleop/robot_control/dex_retargeting/CITATION.cff
delete mode 100644 teleop/robot_control/dex_retargeting/LICENSE
delete mode 100644 teleop/robot_control/dex_retargeting/__init__.py
delete mode 100644 teleop/robot_control/dex_retargeting/constants.py
delete mode 100644 teleop/robot_control/dex_retargeting/kinematics_adaptor.py
delete mode 100644 teleop/robot_control/dex_retargeting/optimizer.py
delete mode 100644 teleop/robot_control/dex_retargeting/optimizer_utils.py
delete mode 100644 teleop/robot_control/dex_retargeting/retargeting_config.py
delete mode 100644 teleop/robot_control/dex_retargeting/robot_wrapper.py
delete mode 100644 teleop/robot_control/dex_retargeting/seq_retarget.py
delete mode 100644 teleop/robot_control/dex_retargeting/yourdfpy.py
create mode 160000 teleop/televuer
delete mode 100644 teleop/utils/data/-home-unitree-Code-unitree-opject-unitree_sim_isaaclab-.txt
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..7e27969
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,6 @@
+[submodule "teleop/televuer"]
+ path = teleop/televuer
+ url = https://github.com/silencht/televuer
+[submodule "teleop/robot_control/dex-retargeting"]
+ path = teleop/robot_control/dex-retargeting
+ url = https://github.com/silencht/dex-retargeting
diff --git a/LICENSE b/LICENSE
index b241a5c..1c01999 100644
--- a/LICENSE
+++ b/LICENSE
@@ -23,10 +23,7 @@ This code builds upon following open-source code-bases. Please visit the URLs to
5) https://github.com/casadi/casadi
6) https://github.com/meshcat-dev/meshcat-python
7) https://github.com/zeromq/pyzmq
-8) https://github.com/unitreerobotics/unitree_dds_wrapper
-9) https://github.com/tonyzhaozh/act
-10) https://github.com/facebookresearch/detr
-11) https://github.com/Dingry/BunnyVisionPro
-12) https://github.com/unitreerobotics/unitree_sdk2_python
+8) https://github.com/Dingry/BunnyVisionPro
+9) https://github.com/unitreerobotics/unitree_sdk2_python
------------------
diff --git a/README.md b/README.md
index 1999e03..0c985f9 100644
--- a/README.md
+++ b/README.md
@@ -1,11 +1,14 @@
-
avp_teleoperate
-
Unitree Robotics
+
xr_teleoperate
+
+
+
English | 中文 | 日本語
+
# 📺 Video Demo
@@ -26,219 +29,254 @@
+# 🔖 Release Note
+
+1. **Upgraded the Vuer library** to support more XR device modes. The project has been renamed from **`avp_teleoperate`** to **`xr_teleoperate`** to better reflect its broader scope — now supporting not only Apple Vision Pro but also Meta Quest 3 (with controllers) and PICO 4 Ultra Enterprise (with controllers).
+2. **Modularized** parts of the codebase and introduced Git submodules (`git submodule`) for better structure and maintainability.
+3. Added **headless**, **motion**, and **simulation** modes. The startup parameter configuration has been improved for ease of use (see Section 2.2). The **simulation** mode enables environment validation and hardware failure diagnostics.
+
+4. Changed the default hand retarget algorithm from Vector to **DexPilot**, improving the precision and intuitiveness of fingertip pinching.
+
+5. Various other improvements and refinements.
# 0. 📖 Introduction
-This repository implements teleoperation of the **Unitree humanoid robot** using **XR Devices** ( such as Apple Vision Pro、 PICO 4 Ultra Enterprise or Meta Quest 3 ).
-Here are the currently supported robots,
+This repository implements **teleoperation** control of a **Unitree humanoid robot** using **XR (Extended Reality) devices ** (such as Apple Vision Pro, PICO 4 Ultra Enterprise, or Meta Quest 3).
+
+Here are the required devices and wiring diagram,
+
+
+
+
+
+
+
+
+The currently supported devices in this repository:
+# 1. 📦 Installation
-Here are the required devices and wiring diagram,
-
-
-
-
-
-
-
-This is a network topology diagram, using the G1 robot as an example,
+We tested our code on Ubuntu 20.04 and Ubuntu 22.04, other operating systems may be configured differently. This document primarily describes the **default mode**.
-
-
-
-
-
-
-
-
-
-
-# 1. 📦 Prerequisites
+For more information, you can refer to [Official Documentation ](https://support.unitree.com/home/zh/Teleoperation) and [OpenTeleVision](https://github.com/OpenTeleVision/TeleVision).
-We tested our code on Ubuntu 20.04 and Ubuntu 22.04, other operating systems may be configured differently.
+## 1.1 📥 basic
-For more information, you can refer to [Official Documentation ](https://support.unitree.com/home/zh/Teleoperation) and [OpenTeleVision](https://github.com/OpenTeleVision/TeleVision).
+```bash
+# Create a conda environment
+(base) unitree@Host:~$ conda create -n tv python=3.10 pinocchio=3.1.0 numpy=1.26.4 -c conda-forge
+(base) unitree@Host:~$ conda activate tv
+# Clone this repo
+(tv) unitree@Host:~$ git clone https://github.com/unitreerobotics/xr_teleoperate.git
+(tv) unitree@Host:~$ cd xr_teleoperate
+# Shallow clone submodule
+(tv) unitree@Host:~/xr_teleoperate$ git submodule update --init --depth 1
+# Install televuer submodule
+(tv) unitree@Host:~/xr_teleoperate$ cd teleop/televuer
+(tv) unitree@Host:~/xr_teleoperate/teleop/televuer$ pip install -e .
+# Generate the certificate files required for televuer submodule
+(tv) unitree@Host:~/xr_teleoperate/teleop/televuer$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout key.pem -out cert.pem
+# Install dex-retargeting submodule
+(tv) unitree@Host:~/xr_teleoperate/teleop/televuer$ cd ../robot_control/dex-retargeting/
+(tv) unitree@Host:~/xr_teleoperate/teleop/robot_control/dex-retargeting$ pip install -e .
+# Install additional dependencies required by this repo
+(tv) unitree@Host:~/xr_teleoperate/teleop/robot_control/dex-retargeting$ cd ../../../
+(tv) unitree@Host:~/xr_teleoperate$ pip install -r requirements.txt
+```
-## 1.1 🦾 inverse kinematics
+## 1.2 🕹️ unitree_sdk2_python
```bash
-unitree@Host:~$ conda create -n tv python=3.8
-unitree@Host:~$ conda activate tv
-# If you use `pip install`, Make sure pinocchio version is 3.1.0
-(tv) unitree@Host:~$ conda install pinocchio -c conda-forge
-(tv) unitree@Host:~$ pip install meshcat
-(tv) unitree@Host:~$ pip install casadi
+# Install unitree_sdk2_python library which handles communication with the robot
+(tv) unitree@Host:~$ git clone https://github.com/unitreerobotics/unitree_sdk2_python.git
+(tv) unitree@Host:~$ cd unitree_sdk2_python
+(tv) unitree@Host:~$ pip install -e .
```
-> p.s. All identifiers in front of the command are meant for prompting: **Which device and directory the command should be executed on**.
+> **Note 1**: The [unitree_dds_wrapper](https://github.com/unitreerobotics/unitree_dds_wrapper) in the original h1_2 branch was a temporary version. It has now been fully migrated to the official Python-based control and communication library: [unitree_sdk2_python](https://github.com/unitreerobotics/unitree_sdk2_python).
+>
+> **Note 2**: All identifiers in front of the command are meant for prompting: **Which device and directory the command should be executed on**.
>
-In the Ubuntu system's `~/.bashrc` file, the default configuration is: `PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '`
+> In the Ubuntu system's `~/.bashrc` file, the default configuration is: `PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '`
>
> Taking the command `(tv) unitree@Host:~$ pip install meshcat` as an example:
>
> - `(tv)` Indicates the shell is in the conda environment named `tv`.
->- `unitree@Host:~` Shows the user `\u` `unitree` is logged into the device `\h` `Host`, with the current working directory `\w` as `$HOME`.
+> - `unitree@Host:~` Shows the user `\u` `unitree` is logged into the device `\h` `Host`, with the current working directory `\w` as `$HOME`.
> - `$` shows the current shell is Bash (for non-root users).
> - `pip install meshcat` is the command `unitree` wants to execute on `Host`.
->
+>
> You can refer to [Harley Hahn's Guide to Unix and Linux](https://www.harley.com/unix-book/book/chapters/04.html#H) and [Conda User Guide](https://docs.conda.io/projects/conda/en/latest/user-guide/getting-started.html) to learn more.
-## 1.2 🕹️ unitree_sdk2_python
-
-```bash
-# Install unitree_sdk2_python.
-(tv) unitree@Host:~$ git clone https://github.com/unitreerobotics/unitree_sdk2_python.git
-(tv) unitree@Host:~$ cd unitree_sdk2_python
-(tv) unitree@Host:~$ pip install -e .
-```
-
-> p.s. The [unitree_dds_wrapper](https://github.com/unitreerobotics/unitree_dds_wrapper) in the original h1_2 branch was a temporary version. It has now been fully migrated to the official Python-based control and communication library: [unitree_sdk2_python](https://github.com/unitreerobotics/unitree_sdk2_python).
+# 2. 💻 Simulation Deployment
+## 2.1 📥 Environment Setup
+First, install [unitree_sim_isaaclab](https://github.com/unitreerobotics/unitree_sim_isaaclab). Follow that repo’s README.
-# 2. ⚙️ Configuration
-
-## 2.1 📥 basic
+Then launch the simulation with a G1(29 DoF) and Dex3 hand configuration:
```bash
-(tv) unitree@Host:~$ cd ~
-(tv) unitree@Host:~$ git clone https://github.com/unitreerobotics/avp_teleoperate.git
-(tv) unitree@Host:~$ cd ~/avp_teleoperate
-(tv) unitree@Host:~$ pip install -r requirements.txt
+(base) unitree@Host:~$ conda activate unitree_sim_env
+(unitree_sim_env) unitree@Host:~$ cd ~/unitree_sim_isaaclab
+(unitree_sim_env) unitree@Host:~/unitree_sim_isaaclab$ python sim_main.py --device cpu --enable_cameras --task Isaac-PickPlace-Cylinder-G129-Dex3-Joint --enable_dex3_dds --robot_type g129
```
-## 2.2 🔌 Local streaming
+After simulation starts, click once in the window to activate it. The terminal will show: `controller started, start main loop...`
-**2.2.1 Apple Vision Pro**
+Here is the simulation GUI:
-Apple does not allow WebXR on non-https connections. To test the application locally, we need to create a self-signed certificate and install it on the client. You need a ubuntu machine and a router. Connect the Apple Vision Pro and the ubuntu **Host machine** to the same router.
+
-1. install mkcert: https://github.com/FiloSottile/mkcert
-2. check **Host machine** local ip address:
+## 2.2 🚀 Launch
-```bash
-(tv) unitree@Host:~/avp_teleoperate$ ifconfig | grep inet
-```
+This program supports XR control of a physical robot or in simulation. Choose modes with command-line arguments:
-Suppose the local ip address of the **Host machine** is `192.168.123.2`
+- **Basic control parameters**
-> p.s. You can use `ifconfig` command to check your **Host machine** ip address.
+| ⚙️ Parameter | 📜 Description | 🔘 Options | 📌 Default |
+| :---------: | :-------------------------------------------: | :----------------------------------------------------------: | :-------: |
+| `--xr-mode` | Choose XR input mode | `hand` (**hand tracking**) `controller` (**controller tracking**) | `hand` |
+| `--arm` | Choose robot arm type (see 0. 📖 Introduction) | `G1_29` `G1_23` `H1_2` `H1` | `G1_29` |
+| `--ee` | Choose end-effector (see 0. 📖 Introduction) | `dex1` `dex3` `inspire1` | none |
-3. create certificate:
+- **Mode flags**
-```bash
-(tv) unitree@Host:~/avp_teleoperate$ mkcert -install && mkcert -cert-file cert.pem -key-file key.pem 192.168.123.2 localhost 127.0.0.1
-```
+| ⚙️ Flag | 📜 Description |
+| :----------: | :----------------------------------------------------------: |
+| `--record` | **Enable data recording**: after pressing **r** to start, press **s** to start/stop saving an episode. Can repeat. |
+| `--motion` | **Enable motion control**: allows independent robot control during teleop. In hand mode, use the R3 remote for walking; in controller mode, use joystick for walking |
+| `--headless` | Run without GUI (for headless PC2 deployment) |
+| `--sim` | Enable **simulation mode** |
-place the generated `cert.pem` and `key.pem` files in `teleop`
+Assuming hand tracking with G1(29 DoF) + Dex3 in simulation with recording:
```bash
-(tv) unitree@Host:~/avp_teleoperate$ cp cert.pem key.pem ~/avp_teleoperate/teleop/
+(tv) unitree@Host:~$ cd ~/xr_teleoperate/teleop/
+(tv) unitree@Host:~/xr_teleoperate/teleop/$ python teleop_hand_and_arm.py --xr-mode=hand --arm=G1_29 --ee=dex3 --sim --record
+# Simplified (defaults apply):
+(tv) unitree@Host:~/xr_teleoperate/teleop/$ python teleop_hand_and_arm.py --ee=dex3 --sim --record
```
-4. open firewall on server:
+After the program starts, the terminal shows:
-```bash
-(tv) unitree@Host:~/avp_teleoperate$ sudo ufw allow 8012
-```
+
-5. install ca-certificates on Apple Vision Pro:
+Next steps:
-```bash
-(tv) unitree@Host:~/avp_teleoperate$ mkcert -CAROOT
-```
+1. Wear your XR headset (e.g. Apple Vision Pro, PICO4, etc.)
-Copy the `rootCA.pem` via AirDrop to Apple Vision Pro and install it.
+2. Connect to the corresponding Wi‑Fi
-Settings > General > About > Certificate Trust Settings. Under "Enable full trust for root certificates", turn on trust for the certificate.
+3. Open a browser (e.g. Safari or PICO Browser) and go to: `https://192.168.123.2:8012?ws=wss://192.168.123.2:8012`
-> In the new version of Vision OS 2, this step is different: After copying the certificate to the Apple Vision Pro device via AirDrop, a certificate-related information section will appear below the account bar in the top left corner of the Settings app. Tap it to enable trust for the certificate.
+ > **Note 1**: This IP must match your **Host** IP (check with `ifconfig`).
+ > **Note 2**: You may see a warning page. Click **Advanced**, then **Proceed to IP (unsafe)**.
-Settings > Apps > Safari > Advanced > Feature Flags > Enable WebXR Related Features.
+
+
+
+
+
-------
+4. In the Vuer web, click **Virtual Reality**. Allow all prompts to start the VR session.
-**2.2.2 PICO 4 Ultra Enterprise or Meta Quest 3**
+
-We have tried using hand tracking for teleoperation on the PICO 4 Ultra Enterprise and Meta-Quest 3.
+5. You’ll see the robot’s first-person view in the headset. The terminal prints connection info:
+
+ ```bash
+ websocket is connected. id:dbb8537d-a58c-4c57-b49d-cbb91bd25b90
+ default socket worker is up, adding clientEvents
+ Uplink task running. id:dbb8537d-a58c-4c57-b49d-cbb91bd25b90
+ ```
-The system specifications of PICO 4 Ultra Enterprise:
+6. Align your arm to the **robot’s initial pose** to avoid sudden movements at start:
-> System Version: 5.12.6.U; Android version number: 14; Software version number: c000_cf01_bv1.0.1_sv5.12.6_202412121344_sparrow_b4244_user; browser version: [4.0.28 beta version](https://github.com/vuer-ai/vuer/issues/45#issuecomment-2674918619)
+
-The system specifications of Meta-Quest 3:
+7. Press **r** in the terminal to begin teleoperation. You can now control the robot arm and dexterous hand.
-> System version: 49829370066100510; Version: 62.0.0.273.343.570372087; Runtime version: 62.0.0.269.341.570372063; OS version: SQ3A.220605.009.A1.
+8. During teleoperation, press **s** to start recording; press **s** again to stop and save. Repeatable process.
-For more configuration steps, please refer to the [issue](https://github.com/unitreerobotics/avp_teleoperate/issues/32).
+
-## 2.3 🔎 Unit Test
+> **Note 1**: Recorded data is stored in `xr_teleoperate/teleop/utils/data` by default, with usage instructions at this repo: [unitree_IL_lerobot](https://github.com/unitreerobotics/unitree_IL_lerobot/tree/main?tab=readme-ov-file#data-collection-and-conversion).
+> **Note 2**: Please pay attention to your disk space size during data recording.
-This step is to verify that the environment is installed correctly.
+## 2.3 🔚 Exit
-comming soon.
+Press **q** in the terminal (or “record image” window) to quit.
-# 3. 🚀 Usage
+# 3. 🤖 Physical Deployment
-Please read the [Official Documentation ](https://support.unitree.com/home/zh/Teleoperation) at least once before starting this program.
+Physical deployment steps are similar to simulation, with these key differences:
+## 3.1 🖼️ Image Service
-## 3.1 🖼️ Image Server
+In the simulation environment, the image service is automatically enabled. For physical deployment, you need to manually start the image service based on your specific camera hardware. The steps are as follows:
-Copy `image_server.py` in the `avp_teleoperate/teleop/image_server` directory to the **Development Computing Unit PC2** of Unitree Robot (G1/H1/H1_2/etc.), and execute the following command **in the PC2**:
+Copy `image_server.py` in the `xr_teleoperate/teleop/image_server` directory to the **Development Computing Unit PC2** of Unitree Robot (G1/H1/H1_2/etc.),
```bash
-# p.s.1 You can transfer image_server.py to PC2 via the scp command and then use ssh to remotely login to PC2 to execute it.
+# p.s. You can transfer image_server.py to PC2 via the scp command and then use ssh to remotely login to PC2 to execute it.
# Assuming the IP address of the development computing unit PC2 is 192.168.123.164, the transmission process is as follows:
# log in to PC2 via SSH and create the folder for the image server
(tv) unitree@Host:~$ ssh unitree@192.168.123.164 "mkdir -p ~/image_server"
# Copy the local image_server.py to the ~/image_server directory on PC2
-(tv) unitree@Host:~$ scp ~/avp_teleoperate/teleop/image_server/image_server.py unitree@192.168.123.164:~/image_server/
+(tv) unitree@Host:~$ scp ~/xr_teleoperate/teleop/image_server/image_server.py unitree@192.168.123.164:~/image_server/
+```
+and execute the following command **in the PC2**:
-# p.s.2 Currently, this image transmission program supports two methods for reading images: OpenCV and Realsense SDK. Please refer to the comments in the `ImageServer` class within `image_server.py` to configure your image transmission service according to your camera hardware.
+```bash
+# p.s. Currently, this image transmission program supports two methods for reading images: OpenCV and Realsense SDK. Please refer to the comments in the `ImageServer` class within `image_server.py` to configure your image transmission service according to your camera hardware.
# Now located in Unitree Robot PC2 terminal
unitree@PC2:~/image_server$ python image_server.py
# You can see the terminal output as follows:
@@ -250,12 +288,14 @@ unitree@PC2:~/image_server$ python image_server.py
After image service is started, you can use `image_client.py` **in the Host** terminal to test whether the communication is successful:
```bash
-(tv) unitree@Host:~/avp_teleoperate/teleop/image_server$ python image_client.py
+(tv) unitree@Host:~/xr_teleoperate/teleop/image_server$ python image_client.py
```
-## 3.2 ✋ Inspire hands Server (optional)
+## 3.2 ✋ Inspire Hand Service (optional)
-> Note: If the selected robot configuration does not use the Inspire dexterous hand (Gen1), please ignore this section.
+> **Note 1**: Skip this if your config does not use the Inspire hand.
+> **Note 2**: For the G1 robot with [Inspire DFX hand](https://support.unitree.com/home/zh/G1_developer/inspire_dfx_dexterous_hand), see [issue #46](https://github.com/unitreerobotics/xr_teleoperate/issues/46).
+> **Note 3**: For [Inspire FTP hand]((https://support.unitree.com/home/zh/G1_developer/inspire_ftp_dexterity_hand)), see [issue #48](https://github.com/unitreerobotics/xr_teleoperate/issues/48).
You can refer to [Dexterous Hand Development](https://support.unitree.com/home/zh/H1_developer/Dexterous_hand) to configure related environments and compile control programs. First, use [this URL](https://oss-global-cdn.unitree.com/static/0a8335f7498548d28412c31ea047d4be.zip) to download the dexterous hand control interface program. Copy it to **PC2** of Unitree robots.
@@ -275,143 +315,99 @@ unitree@PC2:~/h1_inspire_service/build$ ./h1_hand_example
If two hands open and close continuously, it indicates success. Once successful, close the `./h1_hand_example` program in Terminal 2.
-## 3.3 🚀 Start
+## 3.3 🚀 Launch
> 
>
> 1. Everyone must keep a safe distance from the robot to prevent any potential danger!
->
> 2. Please make sure to read the [Official Documentation](https://support.unitree.com/home/zh/Teleoperation) at least once before running this program.
->
-> 3. Always make sure that the robot has entered [debug mode (L2+R2)](https://support.unitree.com/home/zh/H1_developer/Remote_control) to stop the motion control program, this will avoid potential command conflict problems.
->
-
-It's best to have two operators to run this program, referred to as **Operator A** and **Operator B**.
+> 3. Without `--motion`, always make sure that the robot has entered [debug mode (L2+R2)](https://support.unitree.com/home/zh/H1_developer/Remote_control) to stop the motion control program, this will avoid potential command conflict problems.
+> 4. To use motion mode (with `--motion`), ensure the robot is in control mode (via [R3 remote](https://www.unitree.com/R3)).
+> 5. In motion mode:
+> - Right controller **A** = Exit teleop
+> - Both joysticks pressed = soft emergency stop (switch to damping mode)
+> - Left joystick = drive directions;
+> - right joystick = turning;
+> - max speed is limited in the code.
-
-
-First, **Operator B** needs to perform the following steps:
-
-1. Modify the `img_config` image client configuration under the `if __name__ == '__main__':` section in `~/avp_teleoperate/teleop/teleop_hand_and_arm.py`. It should match the image server parameters you configured on PC2 in Section 3.1.
-
-2. Choose different launch parameters based on your robot configuration. Here are some example commands:
-
- ```bash
- # 1. G1 (29DoF) Robot + Dex3-1 Dexterous Hand (Note: G1_29 is the default value for --arm, so it can be omitted)
- (tv) unitree@Host:~/avp_teleoperate/teleop$ python teleop_hand_and_arm.py --arm=G1_29 --hand=dex3
-
- # 2. G1 (29DoF) Robot only
- (tv) unitree@Host:~/avp_teleoperate/teleop$ python teleop_hand_and_arm.py
-
- # 3. G1 (23DoF) Robot
- (tv) unitree@Host:~/avp_teleoperate/teleop$ python teleop_hand_and_arm.py --arm=G1_23
-
- # 4. H1_2 Robot + Inspire hand
- (tv) unitree@Host:~/avp_teleoperate/teleop$ python teleop_hand_and_arm.py --arm=H1_2 --hand=inspire1
-
- # 5. H1 Robot
- (tv) unitree@Host:~/avp_teleoperate/teleop$ python teleop_hand_and_arm.py --arm=H1
-
- # 6. If you want to enable data visualization + recording, you can add the --record option
- (tv) unitree@Host:~/avp_teleoperate/teleop$ python teleop_hand_and_arm.py --arm=G1_23 --record
- ```
-
-3. If the program starts successfully, the terminal will pause at the final line displaying the message: "Please enter the start signal (enter 'r' to start the subsequent program):"
-
-
-
-And then, **Operator A** needs to perform the following steps:
-
-1. Wear your Apple Vision Pro device.
-
-2. Open Safari on Apple Vision Pro and visit : https://192.168.123.2:8012?ws=wss://192.168.123.2:8012
-
- > p.s. This IP address should match the IP address of your **Host machine**.
-
-3. Click `Enter VR` and `Allow` to start the VR session.
-
-4. You will see the robot's first-person perspective in the Apple Vision Pro.
-
-
-
-Next, **Operator B** can start teleoperation program by pressing the **r** key in the terminal.
-
-At this time, **Operator A** can remotely control the robot's arms (and dexterous hands).
-
-If the `--record` parameter is used, **Operator B** can press **s** key in the opened "record image" window to start recording data, and press **s** again to stop. This operation can be repeated as needed for multiple recordings.
-
-> p.s.1 Recorded data is stored in `avp_teleoperate/teleop/utils/data` by default, with usage instructions at this repo: [unitree_IL_lerobot](https://github.com/unitreerobotics/unitree_IL_lerobot/tree/main?tab=readme-ov-file#data-collection-and-conversion).
->
-> p.s.2 Please pay attention to your disk space size during data recording.
+Same as simulation but follow the safety warnings above.
## 3.4 🔚 Exit
-To exit the program, **Operator B** can press the **q** key in the 'record image' window.
-
-> 
+> 
+> To avoid damaging the robot, it is recommended to position the robot's arms close to the initial pose before pressing **q** to exit.
>
-> To avoid damaging the robot, it's best to ensure that **Operator A** positions the robot's arms in a naturally lowered or appropriate position before **Operator B** presses **q** to exit.
+> - In **Debug Mode**: After pressing the exit key, both arms will return to the robot's **initial pose** within 5 seconds, and then the control will end.
+>
+> - In **Motion Mode**: After pressing the exit key, both arms will return to the robot's **motion control pose** within 5 seconds, and then the control will end.
+
+Same as simulation but follow the safety warnings above.
-# 4. 🗺️ Codebase Tutorial
+# 4. 🗺️ Codebase Overview
```
-avp_teleoperate/
+xr_teleoperate/
│
├── assets [Storage of robot URDF-related files]
│
+├── hardware [3D‑printed hardware modules]
+│
├── teleop
│ ├── image_server
-│ │ ├── image_client.py [Used to receive image data from the robot image server]
-│ │ ├── image_server.py [Capture images from cameras and send via network (Running on robot's on-board computer)]
+│ │ ├── image_client.py [Used to receive image data from the robot image server]
+│ │ ├── image_server.py [Capture images from cameras and send via network (Running on robot's Development Computing Unit PC2)]
│ │
-│ ├── open_television
-│ │ ├── television.py [Using Vuer to capture wrist and hand data from apple vision pro]
-│ │ ├── tv_wrapper.py [Post-processing of captured data]
+│ ├── televuer
+│ │ ├── src/televuer
+│ │ ├── television.py [captures XR devices's head, wrist, hand/controller data]
+│ │ ├── tv_wrapper.py [Post-processing of captured data]
+│ │ ├── test
+│ │ ├── _test_television.py [test for television.py]
+│ │ ├── _test_tv_wrapper.py [test for tv_wrapper.py]
│ │
│ ├── robot_control
-│ │ ├── robot_arm_ik.py [Inverse kinematics of the arm]
-│ │ ├── robot_arm.py [Control dual arm joints and lock the others]
+│ │ ├── src/dex-retargeting [Dexterous hand retargeting algorithm library]
+│ │ ├── robot_arm_ik.py [Inverse kinematics of the arm]
+│ │ ├── robot_arm.py [Control dual arm joints and lock the others]
+│ │ ├── hand_retargeting.py [Dexterous hand retargeting algorithm library Wrapper]
│ │ ├── robot_hand_inspire.py [Control inspire hand joints]
│ │ ├── robot_hand_unitree.py [Control unitree hand joints]
│ │
│ ├── utils
-│ │ ├── episode_writer.py [Used to record data for imitation learning]
-│ │ ├── mat_tool.py [Some small math tools]
+│ │ ├── episode_writer.py [Used to record data for imitation learning]
│ │ ├── weighted_moving_filter.py [For filtering joint data]
│ │ ├── rerun_visualizer.py [For visualizing data during recording]
│ │
-│ │──teleop_hand_and_arm.py [Startup execution code for teleoperation]
+│ └── teleop_hand_and_arm.py [Startup execution code for teleoperation]
```
-
-
# 5. 🛠️ Hardware
-## 5.1 📋 List
-
-| Item | Quantity | Link | Remarks |
-| :--------------------------: | :------: | :----------------------------------------------------------: | :---------------------------------------------------------: |
-| **Unitree Robot G1** | 1 | https://www.unitree.com/g1 | With development computing unit |
-| **Apple Vision Pro** | 1 | https://www.apple.com/apple-vision-pro/ | |
-| **Router** | 1 | | |
-| **User PC** | 1 | | Recommended graphics card performance at RTX 4080 and above |
-| **Head Stereo Camera** | 1 | [For reference only] http://e.tb.cn/h.TaZxgkpfWkNCakg?tk=KKz03Kyu04u | For head |
-| **Head Camera Mount** | 1 | https://github.com/unitreerobotics/avp_teleoperate/blob/g1/hardware/head_stereo_camera_mount.STEP | For mounting head stereo camera, FOV 130° |
-| Intel RealSense D405 | 2 | https://www.intelrealsense.com/depth-camera-d405/ | For wrist |
-| Wrist Ring Mount | 2 | https://github.com/unitreerobotics/avp_teleoperate/blob/g1/hardware/wrist_ring_mount.STEP | Used with wrist camera mount |
-| Left Wrist Camera Mount | 1 | https://github.com/unitreerobotics/avp_teleoperate/blob/g1/hardware/left_wrist_D405_camera_mount.STEP | For mounting left wrist RealSense D405 camera |
-| Right Wrist Camera Mount | 1 | https://github.com/unitreerobotics/avp_teleoperate/blob/g1/hardware/right_wrist_D405_camera_mount.STEP | For mounting right wrist RealSense D405 camera |
-| M3 hex nuts | 4 | [For reference only] https://a.co/d/1opqtOr | For Wrist fastener |
-| M3x12 screws | 4 | [For reference only] https://amzn.asia/d/aU9NHSf | For wrist fastener |
-| M3x6 screws | 4 | [For reference only] https://amzn.asia/d/0nEz5dJ | For wrist fastener |
-| **M4x14 screws** | 2 | [For reference only] https://amzn.asia/d/cfta55x | For head fastener |
-| **M2x4 self-tapping screws** | 4 | [For reference only] https://amzn.asia/d/1msRa5B | For head fastener |
+## 5.1 📋 Parts List
+
+| Item | Quantity | Link(s) | Remarks |
+| :--------------------------: | :------: | :----------------------------------------------------------: | :----------------------------------------------------------: |
+| **humanoid robot** | 1 | https://www.unitree.com | With development computing unit |
+| **XR device** | 1 | https://www.apple.com/apple-vision-pro/ https://www.meta.com/quest/quest-3 https://www.picoxr.com/products/pico4-ultra-enterprise | |
+| **Router** | 1 | | Required for **default mode**; **wireless mode** not need. |
+| **User PC** | 1 | | In **simulation mode**, please use the [officially recommended](https://docs.isaacsim.omniverse.nvidia.com/4.5.0/installation/requirements.html) hardware resources for deployment. |
+| **Head stereo camera** | 1 | [For reference only] http://e.tb.cn/h.TaZxgkpfWkNCakg?tk=KKz03Kyu04u | For robot head view |
+| **Head camera mount** | 1 | https://github.com/unitreerobotics/xr_teleoperate/blob/g1/hardware/head_stereo_camera_mount.STEP | For mounting head stereo camera |
+| Intel RealSense D405 camera | 2 | https://www.intelrealsense.com/depth-camera-d405/ | For wrist |
+| Wrist ring mount | 2 | https://github.com/unitreerobotics/xr_teleoperate/blob/g1/hardware/wrist_ring_mount.STEP | Used with wrist camera mount |
+| Left wrist D405 mount | 1 | https://github.com/unitreerobotics/xr_teleoperate/blob/g1/hardware/left_wrist_D405_camera_mount.STEP | For mounting left wrist RealSense D405 camera |
+| Right wrist D405 mount | 1 | https://github.com/unitreerobotics/xr_teleoperate/blob/g1/hardware/right_wrist_D405_camera_mount.STEP | For mounting right wrist RealSense D405 camera |
+| M3-1 hex nuts | 4 | [For reference only] https://a.co/d/1opqtOr | For Wrist fastener |
+| M3x12 screws | 4 | [For reference only] https://amzn.asia/d/aU9NHSf | For Wrist fastener |
+| M3x6 screws | 4 | [For reference only] https://amzn.asia/d/0nEz5dJ | For Wrist fastener |
+| **M4x14 screws** | 2 | [For reference only] https://amzn.asia/d/cfta55x | For head fastener |
+| **M2x4 self‑tapping screws** | 4 | [For reference only] https://amzn.asia/d/1msRa5B | For head fastener |
> Note: The bolded items are essential equipment for teleoperation tasks, while the other items are optional equipment for recording [datasets](https://huggingface.co/unitreerobotics).
-## 5.2 🔨 Installation diagram
+## 5.2 🔨 Mounting Diagrams
@@ -471,15 +467,12 @@ avp_teleoperate/
This code builds upon following open-source code-bases. Please visit the URLs to see the respective LICENSES:
-1) https://github.com/OpenTeleVision/TeleVision
-2) https://github.com/dexsuite/dex-retargeting
-3) https://github.com/vuer-ai/vuer
-4) https://github.com/stack-of-tasks/pinocchio
-5) https://github.com/casadi/casadi
-6) https://github.com/meshcat-dev/meshcat-python
-7) https://github.com/zeromq/pyzmq
-8) https://github.com/unitreerobotics/unitree_dds_wrapper
-9) https://github.com/tonyzhaozh/act
-10) https://github.com/facebookresearch/detr
-11) https://github.com/Dingry/BunnyVisionPro
-12) https://github.com/unitreerobotics/unitree_sdk2_python
+1. https://github.com/OpenTeleVision/TeleVision
+2. https://github.com/dexsuite/dex-retargeting
+3. https://github.com/vuer-ai/vuer
+4. https://github.com/stack-of-tasks/pinocchio
+5. https://github.com/casadi/casadi
+6. https://github.com/meshcat-dev/meshcat-python
+7. https://github.com/zeromq/pyzmq
+8. https://github.com/Dingry/BunnyVisionPro
+9. https://github.com/unitreerobotics/unitree_sdk2_python
diff --git a/README_ja-JP.md b/README_ja-JP.md
index 2edb0ed..d9bda66 100644
--- a/README_ja-JP.md
+++ b/README_ja-JP.md
@@ -1,12 +1,14 @@
-
avp_teleoperate
-
Unitree Robotics
+
xr_teleoperate
+
+
+
English | 中文 | 日本語
-# 📺 ビデオデモ
+# 📺 デモ動画
@@ -15,465 +17,395 @@
- G1 (29自由度) + Dex3-1
+ G1 (29DoF) + Dex3-1
- H1_2(腕 7自由度)
+ H1_2 (Arm 7DoF)
+# 🔖 更新内容
+
+1. **Vuerライブラリをアップグレード**し、より多くのXRデバイスモードに対応しました。これに伴い、プロジェクト名を **`avp_teleoperate`** から **`xr_teleoperate`** に変更しました。従来の Apple Vision Pro に加え、**Meta Quest 3(コントローラー対応)** や **PICO 4 Ultra Enterprise(コントローラー対応)** にも対応しています。
+2. 一部の機能を**モジュール化**し、Gitサブモジュール(`git submodule`)を用いて管理・読み込みを行うことで、コード構造の明確化と保守性を向上させました。
+3. **ヘッドレスモード**、**運用モード**、**シミュレーションモード**を新たに追加し、起動パラメータの設定を最適化しました(第2.2節参照)。**シミュレーションモード**により、環境構成の検証やハードウェア故障の切り分けが容易になります。
+4. デフォルトの手指マッピングアルゴリズムを Vector から **DexPilot** に変更し、指先のつまみ動作の精度と操作性を向上させました。
+5. その他、さまざまな最適化を実施しました。
# 0. 📖 イントロダクション
-このリポジトリは、**Apple Vision Pro** を使用して **Unitree ヒューマノイドロボット** を遠隔操作するためのものです。
-以下は本リポジトリで現在サポートされているロボットの種類です,
+このリポジトリでは、**XR(拡張現実)デバイス**(Apple Vision Pro、PICO 4 Ultra Enterprise、Meta Quest 3など)を使用して**Unitreeヒューマノイドロボット**の**遠隔操作**を実装しています。
+
+必要なデバイスと配線図は以下の通りです。
+
+
+
+
+
+
+
+
+このリポジトリで現在サポートされているデバイス:
-以下は、必要なデバイスと配線図です:
-
-
-
-
-
-
-
-これはネットワークトポロジー図で、G1ロボットを例にしています:
-
-
-
-
-
-
-
+# 1. 📦 インストール
-# 1. 📦 前提条件
+Ubuntu 20.04と22.04でテスト済みです。他のOSでは設定が異なる場合があります。本ドキュメントでは、主に通常モードについて説明します。
-私たちは Ubuntu 20.04 と Ubuntu 22.04 でコードをテストしました。他のオペレーティングシステムでは異なる設定が必要かもしれません。
+詳細は[公式ドキュメント](https://support.unitree.com/home/zh/Teleoperation)と[OpenTeleVision](https://github.com/OpenTeleVision/TeleVision)を参照してください。
-詳細については、[公式ドキュメント](https://support.unitree.com/home/zh/Teleoperation) および [OpenTeleVision](https://github.com/OpenTeleVision/TeleVision) を参照してください。
-
-## 1.1 🦾 逆運動学
+## 1.1 📥 基本設定
```bash
-unitree@Host:~$ conda create -n tv python=3.8
-unitree@Host:~$ conda activate tv
-# `pip install` を使用する場合、pinocchio のバージョンが 3.1.0 であることを確認してください
-(tv) unitree@Host:~$ conda install pinocchio -c conda-forge
-(tv) unitree@Host:~$ pip install meshcat
-(tv) unitree@Host:~$ pip install casadi
+# Create a conda environment
+(base) unitree@Host:~$ conda create -n tv python=3.10 pinocchio=3.1.0 numpy=1.26.4 -c conda-forge
+(base) unitree@Host:~$ conda activate tv
+# Clone this repo
+(tv) unitree@Host:~$ git clone https://github.com/unitreerobotics/xr_teleoperate.git
+(tv) unitree@Host:~$ cd xr_teleoperate
+# Shallow clone submodule
+(tv) unitree@Host:~/xr_teleoperate$ git submodule update --init --depth 1
+# Install televuer submodule
+(tv) unitree@Host:~/xr_teleoperate$ cd teleop/televuer
+(tv) unitree@Host:~/xr_teleoperate/teleop/televuer$ pip install -e .
+# Generate the certificate files required for televuer submodule
+(tv) unitree@Host:~/xr_teleoperate/teleop/televuer$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout key.pem -out cert.pem
+# Install dex-retargeting submodule
+(tv) unitree@Host:~/xr_teleoperate/teleop/televuer$ cd ../robot_control/dex-retargeting/
+(tv) unitree@Host:~/xr_teleoperate/teleop/robot_control/dex-retargeting$ pip install -e .
+# Install additional dependencies required by this repo
+(tv) unitree@Host:~/xr_teleoperate/teleop/robot_control/dex-retargeting$ cd ../../../
+(tv) unitree@Host:~/xr_teleoperate$ pip install -r requirements.txt
```
-> 注意:コマンドの前にあるすべての識別子は、**どのデバイスとディレクトリでコマンドを実行するべきか**を示すためのものです。
->
-Ubuntu システムの `~/.bashrc` ファイルでは、デフォルトの設定は次の通りです: `PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '`
->
-> 例として、`(tv) unitree@Host:~$ pip install meshcat` コマンドを取り上げます。
->
-> - `(tv)` はシェルが conda 環境 `tv` にあることを示します。
->- `unitree@Host:~` はユーザー `\u` `unitree` がデバイス `\h` `Host` にログインしており、現在の作業ディレクトリ `\w` が `$HOME` であることを示します。
-> - `$` は現在のシェルが Bash であることを示します(非ルートユーザーの場合)。
-> - `pip install meshcat` は `unitree` が `Host` で実行したいコマンドです。
->
-> 詳細については、[Harley Hahn's Guide to Unix and Linux](https://www.harley.com/unix-book/book/chapters/04.html#H) および [Conda User Guide](https://docs.conda.io/projects/conda/en/latest/user-guide/getting-started.html) を参照してください。
-
## 1.2 🕹️ unitree_sdk2_python
```bash
-# unitree_sdk2_python をインストールします。
+# ロボット通信用ライブラリインストール
(tv) unitree@Host:~$ git clone https://github.com/unitreerobotics/unitree_sdk2_python.git
(tv) unitree@Host:~$ cd unitree_sdk2_python
(tv) unitree@Host:~$ pip install -e .
```
-> 注意:元の h1_2 ブランチの [unitree_dds_wrapper](https://github.com/unitreerobotics/unitree_dds_wrapper) は一時的なバージョンであり、現在は公式の Python 版制御通信ライブラリ [unitree_sdk2_python](https://github.com/unitreerobotics/unitree_sdk2_python) に完全に移行されています。
+> **注1**: 元のh1_2ブランチのunitree_dds_wrapperは暫定版でした。現在は公式Python制御ライブラリunitree_sdk2_pythonに移行済みです。
+>
+> **注2**: コマンド前の識別子は「どのデバイスでどのディレクトリで実行するか」を示しています。
+>
+> Ubuntuの`~/.bashrc`デフォルト設定: `PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '`
+>
+> 例: `(tv) unitree@Host:~$ pip install meshcat`
+>
+> - `(tv)` conda環境`tv`を表示
+> - `unitree@Host:~` ユーザー`unitree`が`Host`デバイスにログイン、カレントディレクトリは`$HOME`
+> - `$` Bashシェル(非rootユーザー)
+> - `pip install meshcat` は`Host`で実行するコマンド
+# 2. 💻 シミュレーション環境
+## 2.1 📥 環境設定
-# 2. ⚙️ 設定
+まずunitree_sim_isaaclabをインストールし、READMEに従って設定します。
-## 2.1 📥 基本
+G1(29 DoF)とDex3ハンド構成でシミュレーションを起動:
```bash
-(tv) unitree@Host:~$ cd ~
-(tv) unitree@Host:~$ git clone https://github.com/unitreerobotics/avp_teleoperate.git
-(tv) unitree@Host:~$ cd ~/avp_teleoperate
-(tv) unitree@Host:~$ pip install -r requirements.txt
+(base) unitree@Host:~$ conda activate unitree_sim_env
+(unitree_sim_env) unitree@Host:~$ cd ~/unitree_sim_isaaclab
+(unitree_sim_env) unitree@Host:~/unitree_sim_isaaclab$ python sim_main.py --device cpu --enable_cameras --task Isaac-PickPlace-Cylinder-G129-Dex3-Joint --enable_dex3_dds --robot_type g129
```
-## 2.2 🔌 ローカルストリーミング
+シミュレーション起動後、ウィンドウをクリックして有効化。ターミナルに`controller started, start main loop...`と表示されます。
-**Apple Vision Pro**
+シミュレーションGUI:
-Apple は非 HTTPS 接続での WebXR を許可していません。アプリケーションをローカルでテストするには、自己署名証明書を作成し、クライアントにインストールする必要があります。Ubuntu マシンとルーターが必要です。Apple Vision Pro と Ubuntu **ホストマシン** を同じルーターに接続します。
+
-1. mkcert をインストールします: https://github.com/FiloSottile/mkcert
-2. **ホストマシン** のローカル IP アドレスを確認します:
+## 2.2 🚀 起動
-```bash
-(tv) unitree@Host:~/avp_teleoperate$ ifconfig | grep inet
-```
+物理ロボットとシミュレーションの両方でXR制御をサポート。コマンドライン引数でモード選択:
-**ホストマシン** のローカル IP アドレスが `192.168.123.2` であると仮定します。
+- **基本制御パラメータ**
-> p.s. `ifconfig` コマンドを使用して **ホストマシン** の IP アドレスを確認できます。
+| ⚙️ パラメータ | 📜 説明 | 🔘 オプション | 📌 デフォルト |
+| :----------: | :----------------------------------: | :----------------------------------------------------------: | :----------: |
+| `--xr-mode` | XR入力モード選択 | `hand` (**ハンドトラッキング**) `controller` (**コントローラートラッキング**) | `hand` |
+| `--arm` | ロボットアームタイプ選択 (0. 📖 参照) | `G1_29` `G1_23` `H1_2` `H1` | `G1_29` |
+| `--ee` | エンドエフェクタ選択 (0. 📖 参照) | `dex1` `dex3` `inspire1` | none |
-3. 証明書を作成します:
+- **モードフラグ**
-```bash
-(tv) unitree@Host:~/avp_teleoperate$ mkcert -install && mkcert -cert-file cert.pem -key-file key.pem 192.168.123.2 localhost 127.0.0.1
-```
+| ⚙️ フラグ | 📜 説明 |
+| :----------: | :----------------------------------------------------------: |
+| `--record` | **データ記録有効化**: **r**押下で開始後、**s**でエピソード記録開始/停止。繰り返し可能 |
+| `--motion` | **モーション制御有効化**: 遠隔操作中に独立したロボット制御を許可。ハンドモードではR3リモコンで歩行、コントローラーモードではジョイスティックで歩行 |
+| `--headless` | GUIなしで実行(ヘッドレスPC2展開用) |
+| `--sim` | **シミュレーションモード**有効化 |
-生成された `cert.pem` と `key.pem` ファイルを `teleop` に配置します。
+G1(29 DoF) + Dex3でハンドトラッキング、シミュレーション、記録モードで起動:
```bash
-(tv) unitree@Host:~/avp_teleoperate$ cp cert.pem key.pem ~/avp_teleoperate/teleop/
+(tv) unitree@Host:~$ cd ~/xr_teleoperate/teleop/
+(tv) unitree@Host:~/xr_teleoperate/teleop/$ python teleop_hand_and_arm.py --xr-mode=hand --arm=G1_29 --ee=dex3 --sim --record
+# 簡略化(デフォルト適用):
+(tv) unitree@Host:~/xr_teleoperate/teleop/$ python teleop_hand_and_arm.py --ee=dex3 --sim --record
```
-4. サーバーでファイアウォールを開きます:
+プログラム起動後、ターミナル表示:
-```bash
-(tv) unitree@Host:~/avp_teleoperate$ sudo ufw allow 8012
-```
+
-5. Apple Vision Pro に ca-certificates をインストールします:
+次の手順:
-```bash
-(tv) unitree@Host:~/avp_teleoperate$ mkcert -CAROOT
-```
-
-`rootCA.pem` を AirDrop 経由で Apple Vision Pro にコピーし、インストールします。
-
-設定 > 一般 > 情報 > 証明書信頼設定。「ルート証明書の完全な信頼を有効にする」の下で、証明書の信頼をオンにします。
-
-設定 > アプリ > Safari > 高度な設定 > 機能フラグ > WebXR 関連機能を有効にします。
-
-> 注意:新しい Vision OS 2 システムでは、この手順が異なります。証明書を AirDrop 経由で Apple Vision Pro デバイスにコピーした後、設定アプリの左上のアカウント欄の下に証明書関連情報欄が表示されます。それをクリックして、証明書の信頼を有効にします。
+1. XRヘッドセット(Apple Vision Pro、PICO4など)を装着
-------
+2. 対応するWi-Fiに接続
-**2.2.2 PICO 4 Ultra Enterprise or Meta Quest 3**
+3. ブラウザ(SafariやPICO Browserなど)で以下にアクセス: `https://192.168.123.2:8012?ws=wss://192.168.123.2:8012`
-PICO 4 Ultra Enterprise と Meta-Quest 3 において、ハンドトラッキングを用いたテレオペレーションを試みました。
+ > **注1**: このIPは**Host**のIPと一致させる必要あり(`ifconfig`で確認)。
+ > **注2**: 警告ページが表示される場合があります。[詳細設定]→[IPにアクセス(安全ではない)]を選択。
-PICO 4 Ultra Enterprise のシステム仕様:
+
-> システムバージョン:5.12.6.U;Android バージョン:14;ソフトウェアバージョン:c000_cf01_bv1.0.1_sv5.12.6_202412121344_sparrow_b4244_user; ブラウザーバージョン: [4.0.28 beta version](https://github.com/vuer-ai/vuer/issues/45#issuecomment-2674918619)
+4. Vuerウェブで[Virtual Reality]をクリック。すべてのプロンプトを許可してVRセッションを開始。
-Meta-Quest 3 のシステム仕様:
+
-> システムバージョン:49829370066100510;バージョン:62.0.0.273.343.570372087;ランタイムバージョン:62.0.0.269.341.570372063;OS バージョン:SQ3A.220605.009.A1
+5. ヘッドセットにロボットの一人称視点が表示されます。ターミナルに接続情報が表示:
- さらなる設定手順については、その [issue](https://github.com/unitreerobotics/avp_teleoperate/issues/32) をご覧ください。
+```bash
+websocket is connected. id:dbb8537d-a58c-4c57-b49d-cbb91bd25b90
+default socket worker is up, adding clientEvents
+Uplink task running. id:dbb8537d-a58c-4c57-b49d-cbb91bd25b90
+```
-## 2.3 🔎 ユニットテスト
+6. 急な動きを防ぐため、ロボットの**初期姿勢**に腕を合わせる:
-このステップは、環境が正しくインストールされているかを確認するためのものです。
+
-近日公開。
+7. ターミナルで**r**を押して遠隔操作を開始。ロボットアームと多指ハンドを制御できます。
+8. 遠隔操作中、**s**で記録開始、再度**s**で停止・保存。繰り返し可能。
+
+> **注1**: 記録データはデフォルトで`xr_teleoperate/teleop/utils/data`に保存。unitree_IL_lerobotで使用方法を確認。
+> **注2**: データ記録時はディスク容量に注意してください。
+## 2.3 🔚 終了
-# 3. 🚀 使用方法
+ターミナル(または「record image」ウィンドウ)で**q**を押して終了。
-このプログラムを開始する前に、[公式ドキュメント](https://support.unitree.com/home/zh/Teleoperation) を少なくとも一度は読んでください。
+# 3. 🤖 物理環境展開
+物理環境展開の手順はシミュレーションと似ていますが、以下の点が異なります:
-## 3.1 🖼️ 画像サーバー
+## 3.1 🖼️ 画像サービス
-`avp_teleoperate/teleop/image_server` ディレクトリにある `image_server.py` を Unitree ロボット (G1/H1/H1_2 など) の **開発用コンピューティングユニット PC2** にコピーし、**PC2** で次のコマンドを実行します:
+`xr_teleoperate/teleop/image_server`ディレクトリの`image_server.py`をUnitreeロボット(G1/H1/H1_2など)の**開発用計算ユニットPC2**にコピーし。
```bash
-# 注意1:scp コマンドを使用して image_server.py を PC2 に転送し、ssh を使用して PC2 にリモートログインして実行できます。
-# 開発用コンピューティングユニット PC2 の IP アドレスが 192.168.123.164 であると仮定すると、転送プロセスは以下のようになります:
-# まず ssh で PC2 にログインし、画像サーバーのフォルダを作成します
+# 補足: scpコマンドでimage_server.pyをPC2に転送後、sshでPC2にリモートログインして実行可能
+# 開発用計算ユニットPC2のIPが192.168.123.164の場合の転送手順:
+# SSHでPC2にログインし、画像サーバー用フォルダ作成
(tv) unitree@Host:~$ ssh unitree@192.168.123.164 "mkdir -p ~/image_server"
-# ローカルの image_server.py を PC2 の ~/image_server ディレクトリにコピーします
-(tv) unitree@Host:~$ scp ~/avp_teleoperate/teleop/image_server/image_server.py unitree@192.168.123.164:~/image_server/
+# ローカルのimage_server.pyをPC2の~/image_serverディレクトリにコピー
+(tv) unitree@Host:~$ scp ~/xr_teleoperate/teleop/image_server/image_server.py unitree@192.168.123.164:~/image_server/
+```
-# 注意2:現在、この画像転送プログラムは OpenCV と Realsense SDK の 2 つの画像読み取り方法をサポートしています。image_server.py の ImageServer クラスのコメントを読んで、カメラハードウェアに応じて画像転送サービスを設定してください。
-# 現在、Unitree ロボット PC2 端末にいます
+**PC2**で以下を実行:
+
+```bash
+# 補足: 現在この画像転送プログラムは、OpenCVとRealsense SDKの2つの画像読み取り方法をサポート。`image_server.py`内の`ImageServer`クラスのコメントを参照し、カメラハードウェアに合わせて画像転送サービスを設定。
+# UnitreeロボットPC2のターミナルで実行
unitree@PC2:~/image_server$ python image_server.py
-# 端末に次のように出力されます:
+# ターミナルに以下の出力が表示:
# {'fps': 30, 'head_camera_type': 'opencv', 'head_camera_image_shape': [480, 1280], 'head_camera_id_numbers': [0]}
# [Image Server] Head camera 0 resolution: 480.0 x 1280.0
# [Image Server] Image server has started, waiting for client connections...
```
-画像サービスが開始された後、**ホスト** 端末で `image_client.py` を使用して通信が成功したかどうかをテストできます:
+画像サービス起動後、**Host**ターミナルで`image_client.py`を使用して通信テスト可能:
```bash
-(tv) unitree@Host:~/avp_teleoperate/teleop/image_server$ python image_client.py
+(tv) unitree@Host:~/xr_teleoperate/teleop/image_server$ python image_client.py
```
-## 3.2 ✋ Inspire ハンドサーバー(オプション)
+## 3.2 ✋ Inspireハンドサービス(オプション)
-> 注意: 選択したロボット構成に Inspire デクスタラスハンドが使用されていない場合、このセクションは無視してください。
+> **Note 1**: Skip this if your config does not use the Inspire hand.
+> **Note 2**: For the G1 robot with [Inspire DFX hand](https://support.unitree.com/home/zh/G1_developer/inspire_dfx_dexterous_hand), see [issue #46](https://github.com/unitreerobotics/xr_teleoperate/issues/46).
+> **Note 3**: For [Inspire FTP hand]((https://support.unitree.com/home/zh/G1_developer/inspire_ftp_dexterity_hand)), see [issue #48](https://github.com/unitreerobotics/xr_teleoperate/issues/48).
-関連する環境を設定し、制御プログラムをコンパイルするには、[デクスタラスハンド開発](https://support.unitree.com/home/zh/H1_developer/Dexterous_hand) を参照できます。まず、[このリンク](https://oss-global-cdn.unitree.com/static/0a8335f7498548d28412c31ea047d4be.zip) を使用してデクスタラスハンド制御インターフェースプログラムをダウンロードし、Unitree ロボットの **PC2** にコピーします。
+多指ハンド開発を参照して関連環境を設定し、制御プログラムをコンパイル。このURLから多指ハンド制御インターフェースプログラムをダウンロードし、Unitreeロボットの**PC2**にコピー。
-Unitree ロボットの **PC2** で次のコマンドを実行します:
+Unitreeロボットの**PC2**で以下を実行:
```bash
unitree@PC2:~$ sudo apt install libboost-all-dev libspdlog-dev
-# プロジェクトをビルドします
+# プロジェクトビルド
unitree@PC2:~$ cd h1_inspire_service & mkdir build & cd build
unitree@PC2:~/h1_inspire_service/build$ cmake .. -DCMAKE_BUILD_TYPE=Release
unitree@PC2:~/h1_inspire_service/build$ make
-# ターミナル 1. h1 inspire ハンドサービスを実行します
+# ターミナル1. h1 inspireハンドサービス実行
unitree@PC2:~/h1_inspire_service/build$ sudo ./inspire_hand -s /dev/ttyUSB0
-# ターミナル 2. サンプルを実行します
+# ターミナル2. サンプル実行
unitree@PC2:~/h1_inspire_service/build$ ./h1_hand_example
```
-2 つの手が連続して開閉する場合、成功を示します。成功したら、ターミナル 2 で `./h1_hand_example` プログラムを閉じます。
+両手が連続的に開閉すれば成功。成功後、ターミナル2の`./h1_hand_example`プログラムを終了。
-## 3.3 🚀 スタート
+## 3.3 🚀 起動
-> 
+> 
>
-> 1. すべての人は、潜在的な危険を防ぐためにロボットから安全な距離を保つ必要があります!
->
-> 2. このプログラムを実行する前に、[公式ドキュメント](https://support.unitree.com/home/zh/Teleoperation) を少なくとも一度は読んでください。
->
-> 3. 常にロボットが [デバッグモード (L2+R2)](https://support.unitree.com/home/zh/H1_developer/Remote_control) に入っていることを確認し、モーションコントロールプログラムを停止して、潜在的なコマンドの競合問題を回避します。
->
-
-このプログラムを実行するには、**オペレーター A** と **オペレーター B** と呼ばれる 2 人のオペレーターがいるのが最適です。
-
-
-
-まず、**オペレーター B** は次の手順を実行する必要があります:
-
-1. `~/avp_teleoperate/teleop/teleop_hand_and_arm.py` の `if __name__ == '__main__':` コードの下にある `img_config` 画像クライアント設定を変更します。これは、3.1 節で PC2 に設定した画像サーバーパラメータと同じである必要があります。
-2. ロボット構成に応じて異なる起動パラメータを選択します。 以下は、いくつかのコマンド例です:
-
-```bash
-# 1. G1(29自由度)ロボット + Dex3-1 多指ハンド(※ G1_29 は --arm のデフォルト値なので省略可能)
-(tv) unitree@Host:~/avp_teleoperate/teleop$ python teleop_hand_and_arm.py --arm=G1_29 --hand=dex3
-
-# 2. G1(29自由度)ロボットのみ
-(tv) unitree@Host:~/avp_teleoperate/teleop$ python teleop_hand_and_arm.py
-
-# 3. G1(23自由度)ロボット
-(tv) unitree@Host:~/avp_teleoperate/teleop$ python teleop_hand_and_arm.py --arm=G1_23
-
-# 4. H1_2 ロボット + Inspire Hand
-(tv) unitree@Host:~/avp_teleoperate/teleop$ python teleop_hand_and_arm.py --arm=H1_2 --hand=inspire1
-
-# 5. H1 ロボット
-(tv) unitree@Host:~/avp_teleoperate/teleop$ python teleop_hand_and_arm.py --arm=H1
-
-# 6. データの可視化および記録を有効にしたい場合は、--record オプションを追加してください
-(tv) unitree@Host:~/avp_teleoperate/teleop$ python teleop_hand_and_arm.py --arm=G1_23 --record
-```
-
-3. プログラムが正常に起動すると、端末の最後の行に "Please enter the start signal (enter 'r' to start the subsequent program):" というメッセージが表示されます。
-
-
-
-次に、**オペレーター A** は次の手順を実行します:
-
-1. Apple Vision Pro デバイスを装着します。
-
-2. Apple Vision Pro で Safari を開き、次の URL にアクセスします: https://192.168.123.2:8012?ws=wss://192.168.123.2:8012
-
- > p.s. この IP アドレスは **ホストマシン** の IP アドレスと一致する必要があります。
-
-3. `Enter VR` をクリックし、`Allow` を選択して VR セッションを開始します。
-
-4. Apple Vision Pro でロボットの一人称視点が表示されます。
-
-
-
-次に、**オペレーター B** は端末で **r** キーを押して遠隔操作プログラムを開始します。
-
-この時点で、**オペレーター A** はロボットのアーム(およびデクスタラスハンド)を遠隔操作できます。
-
-`--record` パラメータを使用した場合、**オペレーター B** は開いている "record image" ウィンドウで **s** キーを押してデータの記録を開始し、再度 **s** キーを押して停止できます。必要に応じてこれを繰り返すことができます。
-
-> 注意1:記録されたデータはデフォルトで `avp_teleoperate/teleop/utils/data` に保存されます。使用方法については、このリポジトリを参照してください: [unitree_IL_lerobot](https://github.com/unitreerobotics/unitree_IL_lerobot/tree/main?tab=readme-ov-file#data-collection-and-conversion)。
->
-> 注意2:データ記録時にはディスク容量に注意してください。
+> 1. すべての人はロボットから安全な距離を保ち、潜在的な危険を防止してください!
+> 2. このプログラムを実行する前に、少なくとも一度は公式ドキュメントをお読みください。
+> 3. `--motion`なしの場合、ロボットがデバッグモード(L2+R2)に入り、モーション制御プログラムが停止していることを確認してください。これにより潜在的なコマンド競合問題を回避できます。
+> 4. モーションモード(`--motion`あり)を使用する場合、ロボットが制御モード(R3リモコン経由)にあることを確認。
+> 5. モーションモード時:
+> - 右コントローラー**A** = 遠隔操作終了
+> - 両ジョイスティック押下 = ソフト非常停止(ダンピングモードに切替)
+> - 左ジョイスティック = 移動方向;
+> - 右ジョイスティック = 旋回;
+> - 最大速度はコード内で制限。
+
+シミュレーションと同じですが、上記の安全警告に従ってください。
## 3.4 🔚 終了
-プログラムを終了するには、**オペレーター B** は 'record image' ウィンドウで **q** キーを押すことができます。
-
-> 
+> 
>
-> ロボットを損傷しないようにするために、**オペレーター A** がロボットのアームを自然に下げた位置または適切な位置に配置した後、**オペレーター B** が **q** を押して終了するのが最適です。
-
+> ロボット損傷を防ぐため、終了前にロボットの腕を初期姿勢に近づけることを推奨。
+>
+> - **デバッグモード**: 終了キー押下後、両腕は5秒以内にロボットの**初期姿勢**に戻り、制御終了。
+> - **モーションモード**: 終了キー押下後、両腕は5秒以内にロボットの**モーション制御姿勢**に戻り、制御終了。
+シミュレーションと同じですが、上記の安全警告に従ってください。
-# 4. 🗺️ コードベースチュートリアル
+# 4. 🗺️ コード構成
```
-avp_teleoperate/
+xr_teleoperate/
+│
+├── assets [ロボットURDF関連ファイル格納]
│
-├── assets [ロボット URDF 関連ファイルの保存]
+├── hardware [3Dプリントハードウェアモジュール]
│
├── teleop
│ ├── image_server
-│ │ ├── image_client.py [ロボット画像サーバーから画像データを受信するために使用]
-│ │ ├── image_server.py [カメラから画像をキャプチャし、ネットワーク経由で送信(ロボットのオンボードコンピュータで実行)]
+│ │ ├── image_client.py [ロボット画像サーバーから画像データを受信]
+│ │ ├── image_server.py [カメラから画像をキャプチャしネットワーク送信(ロボットの開発用計算ユニットPC2で実行)]
│ │
-│ ├── open_television
-│ │ ├── television.py [Apple Vision Pro から Vuer を使用して手首と手のデータをキャプチャ]
-│ │ ├── tv_wrapper.py [キャプチャされたデータの後処理]
+│ ├── televuer
+│ │ ├── src/televuer
+│ │ ├── television.py [XRデバイスの頭部、手首、手・コントローラーのデータを取得]
+│ │ ├── tv_wrapper.py [取得データの後処理]
+│ │ ├── test
+│ │ ├── _test_television.py [television.pyのテスト]
+│ │ ├── _test_tv_wrapper.py [tv_wrapper.pyのテスト]
│ │
│ ├── robot_control
-│ │ ├── robot_arm_ik.py [アームの逆運動学]
-│ │ ├── robot_arm.py [デュアルアームジョイントを制御し、他の部分をロック]
-│ │ ├── robot_hand_inspire.py [Inspire ハンドジョイントを制御]
-│ │ ├── robot_hand_unitree.py [Unitree ハンドジョイントを制御]
+│ │ ├── src/dex-retargeting [多指ハンドリターゲティングアルゴリズムライブラリ]
+│ │ ├── robot_arm_ik.py [アームの逆運動学]
+│ │ ├── robot_arm.py [両腕関節を制御し他をロック]
+│ │ ├── hand_retargeting.py [多指ハンドリターゲティングアルゴリズムラッパー]
+│ │ ├── robot_hand_inspire.py [inspireハンド関節を制御]
+│ │ ├── robot_hand_unitree.py [unitreeハンド関節を制御]
│ │
│ ├── utils
-│ │ ├── episode_writer.py [模倣学習のデータを記録するために使用]
-│ │ ├── mat_tool.py [いくつかの小さな数学ツール]
-│ │ ├── weighted_moving_filter.py [ジョイントデータをフィルタリングするため]
-│ │ ├── rerun_visualizer.py [記録中のデータを可視化するため]
+│ │ ├── episode_writer.py [模倣学習用データ記録]
+│ │ ├── weighted_moving_filter.py [関節データのフィルタリング]
+│ │ ├── rerun_visualizer.py [記録中のデータ可視化]
│ │
-│ │──teleop_hand_and_arm.py [遠隔操作の起動実行コード]
+│ └── teleop_hand_and_arm.py [遠隔操作起動実行コード]
```
-
-
# 5. 🛠️ ハードウェア
-## 5.1 📋 リスト
-
-| アイテム | 数量 | リンク | 備考 |
-| :--------------------------: | :------: | :----------------------------------------------------------: | :---------------------------------------------------------: |
-| **Unitree ロボット G1** | 1 | https://www.unitree.com/g1 | 開発用コンピューティングユニット付き |
-| **Apple Vision Pro** | 1 | https://www.apple.com/apple-vision-pro/ | |
-| **ルーター** | 1 | | |
-| **ユーザー PC** | 1 | | 推奨グラフィックカード性能は RTX 4080 以上 |
-| **ヘッドステレオカメラ** | 1 | [参考のみ] http://e.tb.cn/h.TaZxgkpfWkNCakg?tk=KKz03Kyu04u | ヘッド用 |
-| **ヘッドカメラマウント** | 1 | https://github.com/unitreerobotics/avp_teleoperate/blob/g1/hardware/head_stereo_camera_mount.STEP | ヘッドステレオカメラの取り付け用, FOV 130° |
-| Intel RealSense D405 | 2 | https://www.intelrealsense.com/depth-camera-d405/ | リスト用 |
-| リストリングマウント | 2 | https://github.com/unitreerobotics/avp_teleoperate/blob/g1/hardware/wrist_ring_mount.STEP | リストカメラマウントと一緒に使用 |
-| 左リストカメラマウント | 1 | https://github.com/unitreerobotics/avp_teleoperate/blob/g1/hardware/left_wrist_D405_camera_mount.STEP | 左リスト RealSense D405 カメラの取り付け用 |
-| 右リストカメラマウント | 1 | https://github.com/unitreerobotics/avp_teleoperate/blob/g1/hardware/right_wrist_D405_camera_mount.STEP | 右リスト RealSense D405 カメラの取り付け用 |
-| M3 六角ナット | 4 | [参考のみ] https://a.co/d/1opqtOr | リストファスナー用 |
-| M3x12 ネジ | 4 | [参考のみ] https://amzn.asia/d/aU9NHSf | リストファスナー用 |
-| M3x6 ネジ | 4 | [参考のみ] https://amzn.asia/d/0nEz5dJ | リストファスナー用 |
-| **M4x14 ネジ** | 2 | [参考のみ] https://amzn.asia/d/cfta55x | ヘッドファスナー用 |
-| **M2x4 自タッピングネジ** | 4 | [参考のみ] https://amzn.asia/d/1msRa5B | ヘッドファスナー用 |
-
-> 注意: 太字のアイテムは遠隔操作タスクに必要な機器であり、他のアイテムは [データセット](https://huggingface.co/unitreerobotics) を記録するためのオプション機器です。
-
-## 5.2 🔨 インストール図
+## 5.1 📋 部品リスト
-
-
- アイテム
- シミュレーション
- 実物
-
-
- ヘッド
-
-
-
- ヘッドマウント
-
-
-
-
-
- 組み立ての側面図
-
-
-
-
-
- 組み立ての正面図
-
-
-
-
- リスト
-
-
-
- リストリングとカメラマウント
-
-
-
-
-
- 左手の組み立て
-
-
-
-
-
- 右手の組み立て
-
-
-
-
+| アイテム | 数量 | リンク | 備考 |
+| :------------------------: | :--: | :----------------------------------------------------------: | :----------------------------------------------------------: |
+| **ヒューマノイドロボット** | 1 | https://www.unitree.com | 開発用計算ユニット付属 |
+| **XRデバイス** | 1 | https://www.apple.com/apple-vision-pro/ https://www.meta.com/quest/quest-3 https://www.picoxr.com/products/pico4-ultra-enterprise | |
+| **ルーター** | 1 | | **デフォルトモード**で必要; **ワイヤレスモード**では不要 |
+| **ユーザーPC** | 1 | | **シミュレーションモード**では、公式推奨ハードウェアリソースを使用。 |
+| **ヘッドステレオカメラ** | 1 | [参考] http://e.tb.cn/h.TaZxgkpfWkNCakg?tk=KKz03Kyu04u | ロボット頭部視点用 |
+| **ヘッドカメラマウント** | 1 | https://github.com/unitreerobotics/xr_teleoperate/blob/g1/hardware/head_stereo_camera_mount.STEP | ヘッドステレオカメラ取り付け用 |
+| Intel RealSense D405カメラ | 2 | https://www.intelrealsense.com/depth-camera-d405/ | 手首用 |
+| 手首リングマウント | 2 | https://github.com/unitreerobotics/xr_teleoperate/blob/g1/hardware/wrist_ring_mount.STEP | 手首カメラマウントと併用 |
+| 左手首D405マウント | 1 | https://github.com/unitreerobotics/xr_teleoperate/blob/g1/hardware/left_wrist_D405_camera_mount.STEP | 左手首RealSense D405カメラ取り付け用 |
+| 右手首D405マウント | 1 | https://github.com/unitreerobotics/xr_teleoperate/blob/g1/hardware/right_wrist_D405_camera_mount.STEP | 右手首RealSense D405カメラ取り付け用 |
+| M3-1六角ナット | 4 | [参考] https://a.co/d/1opqtOr | 手首固定用 |
+| M3x12ネジ | 4 | [参考] https://amzn.asia/d/aU9NHSf | 手首固定用 |
+| M3x6ネジ | 4 | [参考] https://amzn.asia/d/0nEz5dJ | 手首固定用 |
+| **M4x14ネジ** | 2 | [参考] https://amzn.asia/d/cfta55x | 頭部固定用 |
+| **M2x4自攻ネジ** | 4 | [参考] https://amzn.asia/d/1msRa5B | 頭部固定用 |
+
+> 注: 太字のアイテムは遠隔操作タスクに必須の設備、その他はデータセット記録用のオプション設備。
-> 注意: リストリングマウントは、画像の赤い円で示されているように、ロボットのリストのシームと一致する必要があります。
+## 5.2 🔨 取り付け図
+ アイテム シミュレーション 実機 頭部 頭部マウント
取り付け側面図
取り付け正面図
手首 手首リングとカメラマウント
左手取り付け図
右手取り付け図
+> 注: 手首リングマウントは、ロボットの手首の継ぎ目に合わせて取り付け(画像の赤丸部分)。
# 6. 🙏 謝辞
-このコードは、以下のオープンソースコードベースに基づいて構築されています。各ライセンスを確認するには、以下の URL を訪れてください。
-
-1) https://github.com/OpenTeleVision/TeleVision
-2) https://github.com/dexsuite/dex-retargeting
-3) https://github.com/vuer-ai/vuer
-4) https://github.com/stack-of-tasks/pinocchio
-5) https://github.com/casadi/casadi
-6) https://github.com/meshcat-dev/meshcat-python
-7) https://github.com/zeromq/pyzmq
-8) https://github.com/unitreerobotics/unitree_dds_wrapper
-9) https://github.com/tonyzhaozh/act
-10) https://github.com/facebookresearch/detr
-11) https://github.com/Dingry/BunnyVisionPro
-12) https://github.com/unitreerobotics/unitree_sdk2_python
+このコードは以下のオープンソースコードを基にしています。各LICENSEはURLで確認してください:
+
+1. https://github.com/OpenTeleVision/TeleVision
+2. https://github.com/dexsuite/dex-retargeting
+3. https://github.com/vuer-ai/vuer
+4. https://github.com/stack-of-tasks/pinocchio
+5. https://github.com/casadi/casadi
+6. https://github.com/meshcat-dev/meshcat-python
+7. https://github.com/zeromq/pyzmq
+8. https://github.com/Dingry/BunnyVisionPro
+9. https://github.com/unitreerobotics/unitree_sdk2_python
diff --git a/README_zh-CN.md b/README_zh-CN.md
index 448195a..315f3f4 100644
--- a/README_zh-CN.md
+++ b/README_zh-CN.md
@@ -1,11 +1,14 @@
-
avp_teleoperate
-
Unitree Robotics
+
xr_teleoperate
+
+
+
English | 中文 | 日本語
+
# 📺 视频演示
@@ -26,14 +29,29 @@
+# 🔖 发布说明
+
+1. 升级 [Vuer](https://github.com/vuer-ai/vuer) 库,扩展了设备支持模式。为更准确反映功能范围,项目由 **avp_teleoperate** 更名为 **xr_teleoperate**,从最初仅支持 Apple Vision Pro,扩展至兼容 Meta Quest 3(含手柄) 与 PICO 4 Ultra Enterprise(含手柄) 等多款 XR 设备。
+2. 对部分功能进行了**模块化**拆分,并通过 Git 子模块(git submodule)方式进行管理和加载,提升代码结构的清晰度与维护性。
+3. 新增**无头**、**运控**及**仿真**模式,优化启动参数配置(详见第2.2节),提升使用便捷性。**仿真**模式的加入,方便了环境验证和硬件故障排查。
+4. 将默认手部映射算法从 Vector 切换为 **DexPilot**,优化了指尖捏合的精度与交互体验。
+5. 其他一些优化
# 0. 📖 介绍
-该仓库实现了使用 **XR设备**(比如 Apple Vision Pro、PICO 4 Ultra Enterprise 或 Meta Quest 3) 对 **宇树(Unitree)人形机器人** 的遥操作控制。
+该仓库实现了使用 **XR设备(Extended Reality)**(比如 Apple Vision Pro、PICO 4 Ultra Enterprise 或 Meta Quest 3 等) 对 **宇树(Unitree)人形机器人** 的遥操作控制。
+
+以下是系统示意图:
+
+
+
+
+
+
-以下是本仓库目前支持的机器人类型:
+以下是本仓库目前支持的设备类型:
+# 1. 📦 安装
-以下是需要的设备和接线示意图:
-
-
-
-
-
-
-
-以下是网络拓扑图,以G1机器人为例:
-
-
-
-
-
-
-
-
-
-# 1. 📦 前置条件
-
-我们在 Ubuntu 20.04 和 Ubuntu 22.04 上测试了我们的代码,其他操作系统可能需要不同的配置。
+我们在 Ubuntu 20.04 和 Ubuntu 22.04 上测试了我们的代码,其他操作系统可能需要不同的配置。本文档主要介绍常规模式。
有关更多信息,您可以参考 [官方文档](https://support.unitree.com/home/zh/Teleoperation) 和 [OpenTeleVision](https://github.com/OpenTeleVision/TeleVision)。
-## 1.1 🦾 逆运动学
+## 1.1 📥 基础环境
```bash
-unitree@Host:~$ conda create -n tv python=3.8
-unitree@Host:~$ conda activate tv
-# 如果您使用 `pip install`,请确保 pinocchio 版本为 3.1.0
-(tv) unitree@Host:~$ conda install pinocchio -c conda-forge
-(tv) unitree@Host:~$ pip install meshcat
-(tv) unitree@Host:~$ pip install casadi
+# 创建 conda 基础环境
+(base) unitree@Host:~$ conda create -n tv python=3.10 pinocchio=3.1.0 numpy=1.26.4 -c conda-forge
+(base) unitree@Host:~$ conda activate tv
+# 克隆本仓库
+(tv) unitree@Host:~$ git clone https://github.com/unitreerobotics/xr_teleoperate.git
+(tv) unitree@Host:~$ cd xr_teleoperate
+# 浅克隆子模块
+(tv) unitree@Host:~/xr_teleoperate$ git submodule update --init --depth 1
+# 安装 televuer 模块
+(tv) unitree@Host:~/xr_teleoperate$ cd teleop/televuer
+(tv) unitree@Host:~/xr_teleoperate/teleop/televuer$ pip install -e .
+# 生成 televuer 模块所需的证书文件
+(tv) unitree@Host:~/xr_teleoperate/teleop/televuer$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout key.pem -out cert.pem
+# 安装 dex-retargeting 模块
+(tv) unitree@Host:~/xr_teleoperate/teleop/televuer$ cd ../robot_control/dex-retargeting/
+(tv) unitree@Host:~/xr_teleoperate/teleop/robot_control/dex-retargeting$ pip install -e .
+# 安装本仓库所需的其他依赖库
+(tv) unitree@Host:~/xr_teleoperate/teleop/robot_control/dex-retargeting$ cd ../../../
+(tv) unitree@Host:~/xr_teleoperate$ pip install -r requirements.txt
```
-> 提醒:命令前面的所有标识符是为了提示:该命令应该在哪个设备和目录下执行。
->
-> p.s. 在 Ubuntu 系统 `~/.bashrc` 文件中,默认配置: `PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '`
-> - 以`(tv) unitree@Host:~$ pip install meshcat` 命令为例:
->
->- `(tv)` 表示 shell 此时位于 conda 创建的 tv 环境中;
->
->- `unitree@Host:~` 表示用户标识 unitree 在设备 Host 上登录,当前的工作目录为 `$HOME`;
->
->- $ 表示当前 shell 为 Bash;
->
->- pip install meshcat 是用户标识 unitree 要在 设备 Host 上执行的命令。
->
->您可以参考 [Harley Hahn's Guide to Unix and Linux](https://www.harley.com/unix-book/book/chapters/04.html#H) 和 [Conda User Guide](https://docs.conda.io/projects/conda/en/latest/user-guide/getting-started.html) 来深入了解这些知识。
-
## 1.2 🕹️ unitree_sdk2_python
```bash
-# 安装 unitree_sdk2_python 库
+# 安装 unitree_sdk2_python 库,该库负责开发设备与机器人之间的通信控制功能
(tv) unitree@Host:~$ git clone https://github.com/unitreerobotics/unitree_sdk2_python.git
(tv) unitree@Host:~$ cd unitree_sdk2_python
(tv) unitree@Host:~$ pip install -e .
```
-> 提醒:原 h1_2 分支中的 [unitree_dds_wrapper](https://github.com/unitreerobotics/unitree_dds_wrapper) 为临时版本,现已全面转换到上述正式的 Python 版控制通信库:[unitree_sdk2_python](https://github.com/unitreerobotics/unitree_sdk2_python)
+> 注意1:原 h1_2 分支中的 [unitree_dds_wrapper](https://github.com/unitreerobotics/unitree_dds_wrapper) 为临时版本,现已全面转换到上述正式的 Python 版控制通信库:[unitree_sdk2_python](https://github.com/unitreerobotics/unitree_sdk2_python)
+
+> 注意2:命令前面的所有标识符是为了提示:该命令应该在哪个设备和目录下执行。
+>
+> p.s. 在 Ubuntu 系统 `~/.bashrc` 文件中,默认配置: `PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '`
+>
+> - 以`(tv) unitree@Host:~$ pip install meshcat` 命令为例:
+>
+> - `(tv)` 表示 shell 此时位于 conda 创建的 tv 环境中;
+>
+> - `unitree@Host:~` 表示用户标识 unitree 在设备 Host 上登录,当前的工作目录为 `$HOME`;
+>
+> - $ 表示当前 shell 为 Bash;
+>
+> - pip install meshcat 是用户标识 unitree 要在 设备 Host 上执行的命令。
+>
+> 您可以参考 [Harley Hahn's Guide to Unix and Linux](https://www.harley.com/unix-book/book/chapters/04.html#H) 和 [Conda User Guide](https://docs.conda.io/projects/conda/en/latest/user-guide/getting-started.html) 来深入了解这些知识。
+
+# 2. 💻 仿真部署
-# 2. ⚙️ 配置
+## 2.1 📥 环境配置
-## 2.1 📥 基础
+首先,请安装 [unitree_sim_isaaclab](https://github.com/unitreerobotics/unitree_sim_isaaclab)。具体安装步骤,可参考该仓库 README 文档。
+
+其次,启动 unitree_sim_isaaclab 仿真环境。假设使用 G1(29 DoF) 和 Dex3 灵巧手配置进行仿真,则启动命令示例如下:
```bash
-(tv) unitree@Host:~$ cd ~
-(tv) unitree@Host:~$ git clone https://github.com/unitreerobotics/avp_teleoperate.git
-(tv) unitree@Host:~$ cd ~/avp_teleoperate
-(tv) unitree@Host:~$ pip install -r requirements.txt
+(base) unitree@Host:~$ conda activate unitree_sim_env
+(unitree_sim_env) unitree@Host:~$ cd ~/unitree_sim_isaaclab
+(unitree_sim_env) unitree@Host:~/unitree_sim_isaaclab$ python sim_main.py --device cpu --enable_cameras --task Isaac-PickPlace-Cylinder-G129-Dex3-Joint --enable_dex3_dds --robot_type g129
```
-## 2.2 🔌 本地流媒体
+仿真环境启动后,使用鼠标左键在窗口内点击一次以激活仿真运行状态。此时,终端内输出 `controller started, start main loop...`。
-**Apple Vision Pro**
+仿真界面如下图所示:
-苹果不允许在非 HTTPS 连接上使用 WebXR。要在本地测试应用程序,我们需要创建一个自签名证书并在客户端上安装它。您需要一台 Ubuntu 机器和一个路由器。将 Apple Vision Pro 和 Ubuntu **主机**连接到同一个路由器。
+
+
+
+
+
-1. 安装 mkcert:https://github.com/FiloSottile/mkcert
-2. 检查**主机**本地 IP 地址:
-```bash
-(tv) unitree@Host:~/avp_teleoperate$ ifconfig | grep inet
-```
-假设 **主机** 的本地 IP 地址为 `192.168.123.2`
+## 2.2 🚀 启动遥操
-> 提醒:您可以使用 `ifconfig` 命令检查您的 **主机** IP 地址。
+本程序支持通过 XR 设备(比如手势或手柄)来控制实际机器人动作,也支持在虚拟仿真中运行。你可以根据需要,通过命令行参数来配置运行方式。
-3. 创建证书:
+以下是本程序的启动参数说明:
-```bash
-(tv) unitree@Host:~/avp_teleoperate$ mkcert -install && mkcert -cert-file cert.pem -key-file key.pem 192.168.123.2 localhost 127.0.0.1
-```
+- 基础控制参数
-将生成的 `cert.pem` 和 `key.pem` 文件放在 `teleop` 目录中
+| ⚙️ 参数 | 📜 说明 | 🔘 目前可选值 | 📌 默认值 |
+| :---------: | :----------------------------------------------: | :------------------------------------------------------: | :------: |
+| `--xr-mode` | 选择 XR 输入模式(通过什么方式控制机器人) | `hand`(**手势跟踪**) `controller`(**手柄跟踪**) | `hand` |
+| `--arm` | 选择机器人设备类型(可参考 0. 📖 介绍) | `G1_29` `G1_23` `H1_2` `H1` | `G1_29` |
+| `--ee` | 选择手臂的末端执行器设备类型(可参考 0. 📖 介绍) | `dex1` `dex3` `inspire1` | 无默认值 |
-```bash
-(tv) unitree@Host:~/avp_teleoperate$ cp cert.pem key.pem ~/avp_teleoperate/teleop/
-```
+- 模式开关参数
-4. 在服务器上打开防火墙:
+| ⚙️ 参数 | 📜 说明 |
+| :----------: | :----------------------------------------------------------: |
+| `--record` | 【启用**数据录制**模式】 按 **r** 键进入遥操后,按 **s** 键可开启数据录制,再次按 **s** 键可结束录制并保存本次 episode 数据。 继续按下 **s** 键可重复前述过程。 |
+| `--motion` | 【启用**运动控制**模式】 开启本模式后,可在机器人运控程序运行下进行遥操作程序。 **手势跟踪**模式下,可使用 [R3遥控器](https://www.unitree.com/cn/R3) 控制机器人正常行走;**手柄跟踪**模式下,也可使用[手柄摇杆控制机器人行走](https://github.com/unitreerobotics/xr_teleoperate/blob/375cdc27605de377c698e2b89cad0e5885724ca6/teleop/teleop_hand_and_arm.py#L247-L257)。 |
+| `--headless` | 【启用**无图形界面**模式】 适用于本程序部署在开发计算单元(PC2)等无显示器情况 |
+| `--sim` | 【启用[**仿真模式**](https://github.com/unitreerobotics/unitree_sim_isaaclab)】 |
-```bash
-(tv) unitree@Host:~/avp_teleoperate$ sudo ufw allow 8012
-```
+------
-5. 在 Apple Vision Pro 上安装 CA 证书:
+根据上述参数说明以及仿真环境配置,我们假设选择**手势跟踪**来控制 G1(29 DoF) + Dex3 灵巧手设备,同时开启仿真模式和数据录制模式。
+
+则启动命令如下所示:
```bash
-(tv) unitree@Host:~/avp_teleoperate$ mkcert -CAROOT
+(tv) unitree@Host:~$ cd ~/xr_teleoperate/teleop/
+(tv) unitree@Host:~/xr_teleoperate/teleop/$ python teleop_hand_and_arm.py --xr-mode=hand --arm=G1_29 --ee=dex3 --sim --record
+# 实际上,由于一些参数存在默认值,该命令也可简化为:
+(tv) unitree@Host:~/xr_teleoperate/teleop/$ python teleop_hand_and_arm.py --ee=dex3 --sim --record
```
-通过 AirDrop 将 `rootCA.pem` 复制到 Apple Vision Pro 并安装它。
+程序正常启动后,终端输出信息如下图所示:
-设置 > 通用 > 关于本机 > 证书信任设置。在“启用对根证书的完全信任”下,打开对证书的信任。
+
+
+
+
+
-设置 > 应用 > Safari > 高级 > 功能标志 > 启用 WebXR 相关功能。
+接下来,执行以下步骤:
-> 提醒:在新版本 Vision OS 2 系统中,该步骤有所不同:将证书通过 AirDrop 复制到 Apple Vision Pro 设备后,将会在设置 APP 中左上角账户栏的下方出现证书相关信息栏,点击进去即可启用对该证书的信任。
+1. 戴上您的 XR 头显设备(比如 apple vision pro 或 pico4 ultra enterprise等)
-------
+2. 连接对应的 WiFi 热点
-**2.2.2 PICO 4 Ultra Enterprise or Meta Quest 3**
+3. 打开浏览器应用(比如 Safari 或 PICO Browser),输入并访问网址:https://192.168.123.2:8012?ws=wss://192.168.123.2:8012
-我们已经尝试在 PICO 4 Ultra Enterprise 和 Meta-Quest 3 上使用手部追踪进行遥操作。
+ > 注意1:此 IP 地址应与您的 **主机** IP 地址匹配。该地址可以使用 `ifconfig` 等类似命令查询。
-PICO 4 Ultra Enterprise 的系统规格如下:
+ > 注意2:此时可能弹出下图所示的警告信息。请点击`Advanced`按钮后,继续点击 `Proceed to ip (unsafe)` 按钮,使用非安全方式继续登录服务器。
-> 系统版本:5.12.6.U;Android 版本号:14;软件版本号:c000_cf01_bv1.0.1_sv5.12.6_202412121344_sparrow_b4244_user
->
-> 浏览器版本:[4.0.28 beta version](https://github.com/vuer-ai/vuer/issues/45#issuecomment-2674918619)
+
+
+
+
+
+
+4. 进入`Vuer`网页界面后,点击 **`Virtual Reality`** 按钮。在允许后续的所有对话框后,启动 VR 会话。界面如下图所示:
+
+
+
+
+
+
+
+5. 此时,您将会在 XR 头显设备中看到机器人的第一人称视野。同时,终端打印出链接建立的信息:
+
+ ```bash
+ websocket is connected. id:dbb8537d-a58c-4c57-b49d-cbb91bd25b90
+ default socket worker is up, adding clientEvents
+ Uplink task running. id:dbb8537d-a58c-4c57-b49d-cbb91bd25b90
+ ```
+
+6. 然后,将手臂形状摆放到与**机器人初始姿态**相接近的姿势。这一步是为了避免在实物部署时,初始位姿差距过大导致机器人产生过大的摆动。
-Meta-Quest 3 的系统规格如下:
+ 机器人初始姿态示意图如下:
-> 系统版本:49829370066100510;版本:62.0.0.273.343.570372087;运行时版本:62.0.0.269.341.570372063;操作系统版本:SQ3A.220605.009.A1
+
+
+
+
+
-更多配置步骤信息,您可以查看该 [issue](https://github.com/unitreerobotics/avp_teleoperate/issues/32)。
+7. 最后,在终端中按下 **r** 键后,正式开启遥操作程序。此时,您可以远程控制机器人的手臂(和灵巧手)
-## 2.3 🔎 单元测试
+8. 在遥操过程中,按 **s** 键可开启数据录制,再次按 **s** 键可结束录制并保存数据(该过程可重复)
-此步骤用于验证环境是否正确安装。
+ 数据录制过程示意图如下:
-即将展现。
+
+
+
+
+
+> 注意1:录制的数据默认存储在 `xr_teleoperate/teleop/utils/data` 中。数据使用说明见此仓库: [unitree_IL_lerobot](https://github.com/unitreerobotics/unitree_IL_lerobot/blob/main/README_zh.md#%E6%95%B0%E6%8D%AE%E9%87%87%E9%9B%86%E4%B8%8E%E8%BD%AC%E6%8D%A2)。
+>
+> 注意2:请在录制数据时注意您的硬盘空间大小。
+
+## 2.3 🔚 退出
+
+要退出程序,可以在终端窗口(或 'record image' 窗口)中按下 **q** 键。
-# 3. 🚀 使用方法
-在开始此程序之前,请至少阅读一次 [官方文档](https://support.unitree.com/home/zh/Teleoperation)。
+# 3. 🤖 实物部署
-## 3.1 🖼️ 图像服务器
+实物部署与仿真部署步骤基本相似,下面将重点指出不同之处。
-将 `avp_teleoperate/teleop/image_server` 目录中的 `image_server.py` 复制到宇树机器人(G1/H1/H1_2 等)的 **开发计算单元 PC2**,并在 **PC2** 上执行以下命令:
+## 3.1 🖼️ 图像服务
+
+仿真环境中已经自动开启了图像服务。实物部署时,需要针对自身相机硬件类型,手动开启图像服务。步骤如下:
+
+将 `xr_teleoperate/teleop/image_server` 目录中的 `image_server.py` 复制到宇树机器人(G1/H1/H1_2 等)的 **开发计算单元 PC2**。
```bash
-# 提醒1:可以通过scp命令将image_server.py传输到PC2,然后使用ssh远程登录PC2后执行它。
+# 提醒:可以通过scp命令将image_server.py传输到PC2,然后使用ssh远程登录PC2后执行它。
# 假设开发计算单元PC2的ip地址为192.168.123.164,那么传输过程示例如下:
# 先ssh登录PC2,创建图像服务器的文件夹
(tv) unitree@Host:~$ ssh unitree@192.168.123.164 "mkdir -p ~/image_server"
# 将本地的image_server.py拷贝至PC2的~/image_server目录下
-(tv) unitree@Host:~$ scp ~/avp_teleoperate/teleop/image_server/image_server.py unitree@192.168.123.164:~/image_server/
+(tv) unitree@Host:~$ scp ~/xr_teleoperate/teleop/image_server/image_server.py unitree@192.168.123.164:~/image_server/
+```
-# 提醒2:目前该图像传输程序支持OpenCV和Realsense SDK两种读取图像的方式,请阅读image_server.py的ImageServer类的注释以便您根据自己的相机硬件来配置自己的图像传输服务。
+并在 **PC2** 上执行以下命令:
+
+```bash
+# 提醒:目前该图像传输程序支持OpenCV和Realsense SDK两种读取图像的方式,请阅读image_server.py的ImageServer类的注释以便您根据自己的相机硬件来配置自己的图像传输服务。
# 现在位于宇树机器人 PC2 终端
unitree@PC2:~/image_server$ python image_server.py
# 您可以看到终端输出如下:
@@ -249,14 +322,18 @@ unitree@PC2:~/image_server$ python image_server.py
在图像服务启动后,您可以在 **主机** 终端上使用 `image_client.py` 测试通信是否成功:
```bash
-(tv) unitree@Host:~/avp_teleoperate/teleop/image_server$ python image_client.py
+(tv) unitree@Host:~/xr_teleoperate/teleop/image_server$ python image_client.py
```
-## 3.2 ✋ Inspire 手部服务器(可选)
+## 3.2 ✋ Inspire 手部服务(可选)
-> 注意:如果选择的机器人配置中没有使用一代 Inspire 灵巧手,那么请忽略本节内容。
+> 注意1:如果选择的机器人配置中没有使用 Inspire 系列灵巧手,那么请忽略本节内容。
+>
+> 注意2:如果选择的G1机器人配置,且使用 [Inspire DFX 灵巧手](https://support.unitree.com/home/zh/G1_developer/inspire_dfx_dexterous_hand),那么请参考 [issue #46](https://github.com/unitreerobotics/xr_teleoperate/issues/46)。
+>
+> 注意3:如果选择的机器人配置中使用了 [Inspire FTP 灵巧手](https://support.unitree.com/home/zh/G1_developer/inspire_ftp_dexterity_hand),那么请参考 [issue #48](https://github.com/unitreerobotics/xr_teleoperate/issues/48)。
-您可以参考 [灵巧手开发](https://support.unitree.com/home/zh/H1_developer/Dexterous_hand) 配置相关环境并编译控制程序。首先,使用 [此链接](https://oss-global-cdn.unitree.com/static/0a8335f7498548d28412c31ea047d4be.zip) 下载灵巧手控制接口程序,然后将其复制到宇树机器人的**PC2**。
+您可以参考 [H1-DFX灵巧手开发](https://support.unitree.com/home/zh/H1_developer/Dexterous_hand) 配置相关环境并编译控制程序。首先,使用 [此链接](https://oss-global-cdn.unitree.com/static/0a8335f7498548d28412c31ea047d4be.zip) 下载灵巧手控制接口程序,然后将其复制到宇树机器人的**PC2**。
在宇树机器人的 **PC2** 上,执行命令:
@@ -274,107 +351,68 @@ unitree@PC2:~/h1_inspire_service/build$ ./h1_hand_example
如果两只手连续打开和关闭,则表示成功。一旦成功,即可关闭终端 2 中的 `./h1_hand_example` 程序。
-## 3.3 🚀 启动
+## 3.3 🚀 启动遥操
> 
>
> 1. 所有人员必须与机器人保持安全距离,以防止任何潜在的危险!
> 2. 在运行此程序之前,请确保至少阅读一次 [官方文档](https://support.unitree.com/home/zh/Teleoperation)。
-> 3. 请务必确保机器人已经进入[调试模式(L2+R2)](https://support.unitree.com/home/zh/H1_developer/Remote_control),以停止运动控制程序发送指令,这样可以避免潜在的指令冲突问题。
-
-最好有两名操作员来运行此程序,称为 **操作员 A** 和 **操作员 B**。
-
+> 3. 没有开启**运动控制**模式(`--motion`)时,请务必确保机器人已经进入 [调试模式(L2+R2)](https://support.unitree.com/home/zh/G1_developer/remote_control),以停止运动控制程序发送指令,这样可以避免潜在的指令冲突问题。
+> 4. 如果要开启**运动控制**模式遥操作,请提前使用 [R3遥控器](https://www.unitree.com/cn/R3) 确保机器人进入主运控模式。
+> 5. 开启**运动控制**模式(`--motion`)时:
+> - 右手柄按键 `A` 为遥操作**退出**功能按键;
+> - 左手柄和右手柄的两个摇杆按键同时按下为软急停按键,机器人会退出运控程序并进入阻尼模式,该功能只在必要情况下使用
+> - 左手柄摇杆控制机器人前后左右(最大控制速度已经在程序中进行了限制)
+> - 右手柄摇杆控制机器人转向(最大控制速度已经在程序中进行了限制)
-
-首先,**操作员 B** 需要执行以下步骤:
-
-1. 修改 `~/avp_teleoperate/teleop/teleop_hand_and_arm.py` 中 `if __name__ == '__main__':` 代码下方的 `img_config` 图像客户端配置,它应该与 3.1 节中您在 PC2 配置的图像服务器参数相同。
-
-2. 根据您的机器人配置选择不同的启动参数。以下是一些启动命令示例:
-
- ```bash
- # 1. G1(29DoF)机器人 + Dex3-1 灵巧手 (实际上,G1_29是--arm的默认参数,可以选择不填写)
- (tv) unitree@Host:~/avp_teleoperate/teleop$ python teleop_hand_and_arm.py --arm=G1_29 --hand=dex3
-
- # 2. 仅G1(29DoF)机器人
- (tv) unitree@Host:~/avp_teleoperate/teleop$ python teleop_hand_and_arm.py
-
- # 3. G1 (23DoF) 机器人
- (tv) unitree@Host:~/avp_teleoperate/teleop$ python teleop_hand_and_arm.py --arm=G1_23
-
- # 4. H1_2 机器人 + 一代 Inspire 灵巧手
- (tv) unitree@Host:~/avp_teleoperate/teleop$ python teleop_hand_and_arm.py --arm=H1_2 --hand=inspire1
-
- # 5. H1 机器人
- (tv) unitree@Host:~/avp_teleoperate/teleop$ python teleop_hand_and_arm.py --arm=H1
-
- # 6. 如果您想开启数据可视化+录制,还可以追加 --record 选项
- (tv) unitree@Host:~/avp_teleoperate/teleop$ python teleop_hand_and_arm.py --arm=G1_23 --record
- ```
-
-3. 程序如果正常启动,终端最后一行将停留在 “Please enter the start signal (enter 'r' to start the subsequent program):” 的字符串输出。
-
-
-
-然后,**操作员 A** 需要执行以下步骤:
-
-1. 戴上您的 Apple Vision Pro 设备。
-
-2. 在 Apple Vision Pro 上打开 Safari,访问:https://192.168.123.2:8012?ws=wss://192.168.123.2:8012
-
- > 注意:此 IP 地址应与您的 **主机** IP 地址匹配。
-
-3. 点击 `Enter VR` 并选择 `Allow` 以启动 VR 会话。
-
-4. 您将会在Apple Vision Pro中看到机器人的第一人称视野。
-
-
-
-接下来,**操作员 B** 可以在终端中按下 **r** 键以启动远程操作程序。
-
-此时,**操作员 A** 可以远程控制机器人的手臂(和灵巧手)。
-
-如果使用了`--record`参数,那么**操作员 B** 可以在打开的“record image”窗口中按 **s** 键开始录制数据,再次按 **s** 键停止。可以根据需要重复此操作进行多次录制。
-
-> 注意1:录制的数据默认存储在 `avp_teleoperate/teleop/utils/data` 中,使用说明见此仓库: [unitree_IL_lerobot](https://github.com/unitreerobotics/unitree_IL_lerobot/blob/main/README_zh.md#%E6%95%B0%E6%8D%AE%E9%87%87%E9%9B%86%E4%B8%8E%E8%BD%AC%E6%8D%A2)。
->
-> 注意2:请在录制数据时注意您的硬盘空间大小。
+与仿真部署基本一致,但要注意上述警告事项。
## 3.4 🔚 退出
> 
>
-> 为了避免损坏机器人,最好确保**操作员 A** 将机器人手臂摆放为自然下垂或其他恰当位置后,**操作员B **再按 **q** 退出。
+> 为了避免损坏机器人,最好确保将机器人手臂摆放为与机器人初始姿态附近的恰当位置后,再按 **q** 退出。
+>
+> 调试模式下:按下退出键后,机器人双臂将在5秒内返回机器人初始姿态,然后结束控制。
+>
+> 运控模式下:按下退出键后,机器人双臂将在5秒内返回机器人运控姿态,然后结束控制。
-要退出程序,**操作员 B** 可以在 'record image' 窗口中按下 **q** 键。
+与仿真部署基本一致,但要注意上述警告事项。
# 4. 🗺️ 代码库教程
```
-avp_teleoperate/
+xr_teleoperate/
│
├── assets [存储机器人 URDF 相关文件]
│
+├── hardware [存储 3D 打印模组]
+│
├── teleop
│ ├── image_server
│ │ ├── image_client.py [用于从机器人图像服务器接收图像数据]
-│ │ ├── image_server.py [从摄像头捕获图像并通过网络发送(在机器人板载计算单元上运行)]
+│ │ ├── image_server.py [从摄像头捕获图像并通过网络发送(在机器人板载计算单元PC2上运行)]
│ │
-│ ├── open_television
-│ │ ├── television.py [使用 Vuer 从 Apple Vision Pro 捕获腕部和手部数据]
-│ │ ├── tv_wrapper.py [对捕获的数据进行后处理]
+│ ├── televuer
+│ │ ├── src/televuer
+│ │ ├── television.py [使用 Vuer 从 XR 设备捕获头部、腕部和手部/手柄等数据]
+│ │ ├── tv_wrapper.py [对捕获的数据进行后处理]
+│ │ ├── test
+│ │ ├── _test_television.py [television.py 的测试程序]
+│ │ ├── _test_tv_wrapper.py [tv_wrapper.py 的测试程序]
│ │
│ ├── robot_control
+│ │ ├── src/dex-retargeting [灵巧手映射算法库]
│ │ ├── robot_arm_ik.py [手臂的逆运动学]
│ │ ├── robot_arm.py [控制双臂关节并锁定其他部分]
+│ │ ├── hand_retargeting.py [灵巧手映射算法库 Wrapper]
│ │ ├── robot_hand_inspire.py [控制因时灵巧手]
│ │ ├── robot_hand_unitree.py [控制宇树灵巧手]
│ │
│ ├── utils
│ │ ├── episode_writer.py [用于记录模仿学习的数据]
-│ │ ├── mat_tool.py [一些小的数学工具]
│ │ ├── weighted_moving_filter.py [用于过滤关节数据的滤波器]
│ │ ├── rerun_visualizer.py [用于可视化录制数据]
│ │
@@ -387,23 +425,23 @@ avp_teleoperate/
## 5.1 📋 清单
-| 项目 | 数量 | 链接 | 备注 |
-| :-----------------------: | :--: | :----------------------------------------------------------: | :----------------------------: |
-| **宇树通用人形机器人 G1** | 1 | https://www.unitree.com/cn/g1 | 需选配开发计算单元版本 |
-| **Apple Vision Pro** | 1 | https://www.apple.com.cn/apple-vision-pro/ | |
-| **路由器** | 1 | | |
-| **用户电脑** | 1 | | 推荐显卡性能在RTX 4080 以上 |
-| **头部双目相机** | 1 | [仅供参考] http://e.tb.cn/h.TaZxgkpfWkNCakg?tk=KKz03Kyu04u | 用于机器人头部视野,视场角130° |
-| **头部相机支架** | 1 | https://github.com/unitreerobotics/avp_teleoperate/blob/g1/hardware/head_stereo_camera_mount.STEP | 用于装配头部相机 |
-| 英特尔 RealSense D405相机 | 2 | https://www.intelrealsense.com/depth-camera-d405/ | 用于腕部灵巧操作视野 |
-| 腕部相机环形支架 | 2 | https://github.com/unitreerobotics/avp_teleoperate/blob/g1/hardware/wrist_ring_mount.STEP | 与腕部相机支架搭配使用 |
-| 左腕相机支架 | 1 | https://github.com/unitreerobotics/avp_teleoperate/blob/g1/hardware/left_wrist_D405_camera_mount.STEP | 用于装配左腕D405相机 |
-| 右腕相机支架 | 1 | https://github.com/unitreerobotics/avp_teleoperate/blob/g1/hardware/right_wrist_D405_camera_mount.STEP | 用于装配右腕D405相机 |
-| M3-1 六角螺母 | 4 | [仅供参考] https://a.co/d/gQaLtHD | 用于腕部紧固件 |
-| M3x12 螺钉 | 4 | [仅供参考] https://amzn.asia/d/aU9NHSf | 用于腕部紧固件 |
-| M3x6 螺钉 | 4 | [仅供参考] https://amzn.asia/d/0nEz5dJ | 用于腕部紧固件 |
-| **M4x14 螺钉** | 2 | [仅供参考] https://amzn.asia/d/cfta55x | 用于头部紧固件 |
-| **M2x4 自攻螺钉** | 4 | [仅供参考] https://amzn.asia/d/1msRa5B | 用于头部紧固件 |
+| 项目 | 数量 | 链接 | 备注 |
+| :-----------------------: | :--: | :----------------------------------------------------------: | :----------------------------------------------------------: |
+| **宇树通用人形机器人 G1** | 1 | https://www.unitree.com/cn/g1 | 需选配开发计算单元版本 |
+| **XR 设备** | 1 | https://www.apple.com.cn/apple-vision-pro/ https://www.meta.com/quest/quest-3 https://www.picoxr.com/products/pico4-ultra-enterprise | |
+| 路由器 | 1 | | 常规模式必须,无线模式不需要 |
+| **用户电脑** | 1 | | 仿真模式下请使用[官方推荐](https://docs.isaacsim.omniverse.nvidia.com/4.5.0/installation/requirements.html)的硬件资源进行部署使用 |
+| **头部双目相机** | 1 | [仅供参考] http://e.tb.cn/h.TaZxgkpfWkNCakg?tk=KKz03Kyu04u | 用于机器人头部视野 |
+| **头部相机支架** | 1 | https://github.com/unitreerobotics/xr_teleoperate/blob/g1/hardware/head_stereo_camera_mount.STEP | 用于装配头部相机 |
+| 英特尔 RealSense D405相机 | 2 | https://www.intelrealsense.com/depth-camera-d405/ | 用于腕部灵巧操作视野 |
+| 腕部相机环形支架 | 2 | https://github.com/unitreerobotics/xr_teleoperate/blob/g1/hardware/wrist_ring_mount.STEP | 与腕部相机支架搭配使用 |
+| 左腕相机支架 | 1 | https://github.com/unitreerobotics/xr_teleoperate/blob/g1/hardware/left_wrist_D405_camera_mount.STEP | 用于装配左腕D405相机 |
+| 右腕相机支架 | 1 | https://github.com/unitreerobotics/xr_teleoperate/blob/g1/hardware/right_wrist_D405_camera_mount.STEP | 用于装配右腕D405相机 |
+| M3-1 六角螺母 | 4 | [仅供参考] https://a.co/d/gQaLtHD | 用于腕部紧固件 |
+| M3x12 螺钉 | 4 | [仅供参考] https://amzn.asia/d/aU9NHSf | 用于腕部紧固件 |
+| M3x6 螺钉 | 4 | [仅供参考] https://amzn.asia/d/0nEz5dJ | 用于腕部紧固件 |
+| **M4x14 螺钉** | 2 | [仅供参考] https://amzn.asia/d/cfta55x | 用于头部紧固件 |
+| **M2x4 自攻螺钉** | 4 | [仅供参考] https://amzn.asia/d/1msRa5B | 用于头部紧固件 |
> 注意:加粗项目是进行遥操作任务时的必需设备,其余项目是录制[数据集](https://huggingface.co/unitreerobotics)时的可选设备。
@@ -475,8 +513,5 @@ avp_teleoperate/
5. https://github.com/casadi/casadi
6. https://github.com/meshcat-dev/meshcat-python
7. https://github.com/zeromq/pyzmq
-8. https://github.com/unitreerobotics/unitree_dds_wrapper
-9. https://github.com/tonyzhaozh/act
-10. https://github.com/facebookresearch/detr
-11. https://github.com/Dingry/BunnyVisionPro
-12. https://github.com/unitreerobotics/unitree_sdk2_python
+8. https://github.com/Dingry/BunnyVisionPro
+9. https://github.com/unitreerobotics/unitree_sdk2_python
diff --git a/requirements.txt b/requirements.txt
index c5d9c66..ba96d60 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,20 +1,4 @@
-einops==0.8.0
-h5py==3.11.0
-ipython==8.12.3
matplotlib==3.7.5
-packaging==24.1
-pandas==2.0.3
-params_proto==2.12.1
-pytransform3d==3.5.0
-PyYAML==6.0.1
-scikit_learn==1.3.2
-scipy==1.10.1
-seaborn==0.13.2
-setuptools==69.5.1
-torch==2.3.0
-torchvision==0.18.0
rerun-sdk==0.20.1
-nlopt>=2.6.1,<2.8.0
-trimesh>=4.4.0
-anytree>=2.12.0
-logging-mp==0.1.5
+meshcat==0.3.2
+sshkeyboard==2.3.1
\ No newline at end of file
diff --git a/teleop/open_television/__init__.py b/teleop/open_television/__init__.py
deleted file mode 100644
index 200cce1..0000000
--- a/teleop/open_television/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# open_television/__init__.py
-# openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout key.pem -out cert.pem
-from .television import TeleVision
-from .tv_wrapper import TeleVisionWrapper, TeleData, TeleStateData
\ No newline at end of file
diff --git a/teleop/open_television/_test_television.py b/teleop/open_television/_test_television.py
deleted file mode 100644
index c1ba588..0000000
--- a/teleop/open_television/_test_television.py
+++ /dev/null
@@ -1,86 +0,0 @@
-import os, sys
-this_file = os.path.abspath(__file__)
-project_root = os.path.abspath(os.path.join(os.path.dirname(this_file), '..'))
-if project_root not in sys.path:
- sys.path.insert(0, project_root)
-
-import time
-import numpy as np
-from multiprocessing import shared_memory
-from open_television import TeleVision
-import logging_mp
-logger_mp = logging_mp.get_logger(__name__, level=logging_mp.INFO)
-
-def run_test_television():
- # image
- image_shape = (480, 640 * 2, 3)
- image_shm = shared_memory.SharedMemory(create=True, size=np.prod(image_shape) * np.uint8().itemsize)
- image_array = np.ndarray(image_shape, dtype=np.uint8, buffer=image_shm.buf)
-
- # from image_server.image_client import ImageClient
- # import threading
- # image_client = ImageClient(tv_img_shape = image_shape, tv_img_shm_name = image_shm.name, image_show=True, server_address="127.0.0.1")
- # image_receive_thread = threading.Thread(target = image_client.receive_process, daemon = True)
- # image_receive_thread.daemon = True
- # image_receive_thread.start()
-
- # television
- use_hand_track = True
- tv = TeleVision(binocular = True, use_hand_tracking = use_hand_track, img_shape = image_shape, img_shm_name = image_shm.name, webrtc=False)
-
- try:
- input("Press Enter to start television test...")
- running = True
- while running:
- logger_mp.info("=" * 80)
- logger_mp.info("Common Data (always available):")
- logger_mp.info(f"head_pose shape: {tv.head_pose.shape}\n{tv.head_pose}\n")
- logger_mp.info(f"left_arm_pose shape: {tv.left_arm_pose.shape}\n{tv.left_arm_pose}\n")
- logger_mp.info(f"right_arm_pose shape: {tv.right_arm_pose.shape}\n{tv.right_arm_pose}\n")
- logger_mp.info("=" * 80)
-
- if use_hand_track:
- logger_mp.info("Hand Tracking Data:")
- logger_mp.info(f"left_hand_positions shape: {tv.left_hand_positions.shape}\n{tv.left_hand_positions}\n")
- logger_mp.info(f"right_hand_positions shape: {tv.right_hand_positions.shape}\n{tv.right_hand_positions}\n")
- logger_mp.info(f"left_hand_orientations shape: {tv.left_hand_orientations.shape}\n{tv.left_hand_orientations}\n")
- logger_mp.info(f"right_hand_orientations shape: {tv.right_hand_orientations.shape}\n{tv.right_hand_orientations}\n")
- logger_mp.info(f"left_hand_pinch_state: {tv.left_hand_pinch_state}")
- logger_mp.info(f"left_hand_pinch_value: {tv.left_hand_pinch_value}")
- logger_mp.info(f"left_hand_squeeze_state: {tv.left_hand_squeeze_state}")
- logger_mp.info(f"left_hand_squeeze_value: {tv.left_hand_squeeze_value}")
- logger_mp.info(f"right_hand_pinch_state: {tv.right_hand_pinch_state}")
- logger_mp.info(f"right_hand_pinch_value: {tv.right_hand_pinch_value}")
- logger_mp.info(f"right_hand_squeeze_state: {tv.right_hand_squeeze_state}")
- logger_mp.info(f"right_hand_squeeze_value: {tv.right_hand_squeeze_value}")
- else:
- logger_mp.info("Controller Data:")
- logger_mp.info(f"left_controller_trigger_state: {tv.left_controller_trigger_state}")
- logger_mp.info(f"left_controller_trigger_value: {tv.left_controller_trigger_value}")
- logger_mp.info(f"left_controller_squeeze_state: {tv.left_controller_squeeze_state}")
- logger_mp.info(f"left_controller_squeeze_value: {tv.left_controller_squeeze_value}")
- logger_mp.info(f"left_controller_thumbstick_state: {tv.left_controller_thumbstick_state}")
- logger_mp.info(f"left_controller_thumbstick_value: {tv.left_controller_thumbstick_value}")
- logger_mp.info(f"left_controller_aButton: {tv.left_controller_aButton}")
- logger_mp.info(f"left_controller_bButton: {tv.left_controller_bButton}")
- logger_mp.info(f"right_controller_trigger_state: {tv.right_controller_trigger_state}")
- logger_mp.info(f"right_controller_trigger_value: {tv.right_controller_trigger_value}")
- logger_mp.info(f"right_controller_squeeze_state: {tv.right_controller_squeeze_state}")
- logger_mp.info(f"right_controller_squeeze_value: {tv.right_controller_squeeze_value}")
- logger_mp.info(f"right_controller_thumbstick_state: {tv.right_controller_thumbstick_state}")
- logger_mp.info(f"right_controller_thumbstick_value: {tv.right_controller_thumbstick_value}")
- logger_mp.info(f"right_controller_aButton: {tv.right_controller_aButton}")
- logger_mp.info(f"right_controller_bButton: {tv.right_controller_bButton}")
- logger_mp.info("=" * 80)
- time.sleep(0.03)
- except KeyboardInterrupt:
- running = False
- logger_mp.warning("KeyboardInterrupt, exiting program...")
- finally:
- image_shm.unlink()
- image_shm.close()
- logger_mp.warning("Finally, exiting program...")
- exit(0)
-
-if __name__ == '__main__':
- run_test_television()
\ No newline at end of file
diff --git a/teleop/open_television/_test_tv_wrapper.py b/teleop/open_television/_test_tv_wrapper.py
deleted file mode 100644
index ab8579e..0000000
--- a/teleop/open_television/_test_tv_wrapper.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import os, sys
-this_file = os.path.abspath(__file__)
-project_root = os.path.abspath(os.path.join(os.path.dirname(this_file), '..'))
-if project_root not in sys.path:
- sys.path.insert(0, project_root)
-
-import numpy as np
-import time
-from multiprocessing import shared_memory
-from open_television import TeleVisionWrapper
-import logging_mp
-logger_mp = logging_mp.get_logger(__name__, level=logging_mp.INFO)
-
-
-def run_test_tv_wrapper():
- image_shape = (480, 640 * 2, 3)
- image_shm = shared_memory.SharedMemory(create=True, size=np.prod(image_shape) * np.uint8().itemsize)
- image_array = np.ndarray(image_shape, dtype=np.uint8, buffer=image_shm.buf)
-
- # from image_server.image_client import ImageClient
- # import threading
- # image_client = ImageClient(tv_img_shape = image_shape, tv_img_shm_name = image_shm.name, image_show=True, server_address="127.0.0.1")
- # image_receive_thread = threading.Thread(target = image_client.receive_process, daemon = True)
- # image_receive_thread.daemon = True
- # image_receive_thread.start()
-
- use_hand_track=True
- tv_wrapper = TeleVisionWrapper(binocular=True, use_hand_tracking=use_hand_track, img_shape=image_shape, img_shm_name=image_shm.name,
- return_state_data=True, return_hand_rot_data = True)
- try:
- input("Press Enter to start tv_wrapper test...")
- running = True
- while running:
- start_time = time.time()
- teleData = tv_wrapper.get_motion_state_data()
-
- logger_mp.info("=== TeleData Snapshot ===")
- logger_mp.info(f"[Head Rotation Matrix]:\n{teleData.head_pose}")
- logger_mp.info(f"[Left EE Pose]:\n{teleData.left_arm_pose}")
- logger_mp.info(f"[Right EE Pose]:\n{teleData.right_arm_pose}")
-
- if use_hand_track:
- logger_mp.info(f"[Left Hand Position] shape {teleData.left_hand_pos.shape}:\n{teleData.left_hand_pos}")
- logger_mp.info(f"[Right Hand Position] shape {teleData.right_hand_pos.shape}:\n{teleData.right_hand_pos}")
-
- if teleData.left_hand_rot is not None:
- logger_mp.info(f"[Left Hand Rotation] shape {teleData.left_hand_rot.shape}:\n{teleData.left_hand_rot}")
- if teleData.right_hand_rot is not None:
- logger_mp.info(f"[Right Hand Rotation] shape {teleData.right_hand_rot.shape}:\n{teleData.right_hand_rot}")
-
- if teleData.left_pinch_value is not None:
- logger_mp.info(f"[Left Pinch Value]: {teleData.left_pinch_value:.2f}")
- if teleData.right_pinch_value is not None:
- logger_mp.info(f"[Right Pinch Value]: {teleData.right_pinch_value:.2f}")
-
- if teleData.tele_state:
- state = teleData.tele_state
- logger_mp.info("[Hand State]:")
- logger_mp.info(f" Left Pinch state: {state.left_pinch_state}")
- logger_mp.info(f" Left Squeeze: {state.left_squeeze_state} ({state.left_squeeze_value:.2f})")
- logger_mp.info(f" Right Pinch state: {state.right_pinch_state}")
- logger_mp.info(f" Right Squeeze: {state.right_squeeze_state} ({state.right_squeeze_value:.2f})")
- else:
- logger_mp.info(f"[Left Trigger Value]: {teleData.left_trigger_value:.2f}")
- logger_mp.info(f"[Right Trigger Value]: {teleData.right_trigger_value:.2f}")
-
- if teleData.tele_state:
- state = teleData.tele_state
- logger_mp.info("[Controller State]:")
- logger_mp.info(f" Left Trigger: {state.left_trigger_state}")
- logger_mp.info(f" Left Squeeze: {state.left_squeeze_ctrl_state} ({state.left_squeeze_ctrl_value:.2f})")
- logger_mp.info(f" Left Thumbstick: {state.left_thumbstick_state} ({state.left_thumbstick_value})")
- logger_mp.info(f" Left A/B Buttons: A={state.left_aButton}, B={state.left_bButton}")
- logger_mp.info(f" Right Trigger: {state.right_trigger_state}")
- logger_mp.info(f" Right Squeeze: {state.right_squeeze_ctrl_state} ({state.right_squeeze_ctrl_value:.2f})")
- logger_mp.info(f" Right Thumbstick: {state.right_thumbstick_state} ({state.right_thumbstick_value})")
- logger_mp.info(f" Right A/B Buttons: A={state.right_aButton}, B={state.right_bButton}")
-
- current_time = time.time()
- time_elapsed = current_time - start_time
- sleep_time = max(0, 0.033 - time_elapsed)
- time.sleep(sleep_time)
- logger_mp.debug(f"main process sleep: {sleep_time}")
-
- except KeyboardInterrupt:
- running = False
- logger_mp.warning("KeyboardInterrupt, exiting program...")
- finally:
- image_shm.unlink()
- image_shm.close()
- logger_mp.warning("Finally, exiting program...")
- exit(0)
-
-if __name__ == '__main__':
- run_test_tv_wrapper()
\ No newline at end of file
diff --git a/teleop/open_television/setup.py b/teleop/open_television/setup.py
deleted file mode 100644
index dc471af..0000000
--- a/teleop/open_television/setup.py
+++ /dev/null
@@ -1,48 +0,0 @@
-from setuptools import setup, find_packages
-
-setup(
- name='open_television',
- version='0.0.1',
- description='XR vision and hand/controller interface for unitree robotics',
- author='silencht',
- packages=find_packages(),
- install_requires=[
- 'numpy==1.23.0',
- 'opencv_contrib_python==4.10.0.82',
- 'opencv_python==4.9.0.80',
- 'aiohttp==3.9.5',
- 'aiohttp_cors==0.7.0',
- 'aiortc==1.8.0',
- 'av==11.0.0',
- # 'vuer==0.0.32rc7', # avp \ pico, hand tracking all work fine. but it not support controller tracking.
-
- # 'vuer==0.0.35', # avp hand tracking works fine. pico is fine too, but the right eye occasionally goes black for a short time at start.
-
- # from 'vuer==0.0.36rc1', # avp hand tracking only shows a flat RGB image — there's no stereo view. Pico (hand / controller tracking) is the same,
- # and sometimes the right eye occasionally goes black for a short time at start.
- # Both avp / pico can display the hand or controller marker, which looks like a black box.
-
- # to 'vuer==0.0.42rc16', # avp hand tracking only shows a flat RGB image — there's no stereo view. Pico (hand / controller tracking) is the same,
- # and sometimes the right eye occasionally goes black for a short time at start.
- # Both avp / pico can display the hand or controller marker, which looks like RGB three-axis color coordinates.
-
- # from 'vuer==0.0.42', # avp hand tracking only shows a flat RGB image — there's no stereo view.
- # to 'vuer==0.0.45', pico hand tracking also only shows a flat RGB image — there's no stereo view. Unable to retrieve hand tracking data.
- # pico controller tracking also only shows a flat RGB image — there's no stereo view. Controller data can be retrieved.
-
- # from 'vuer==0.0.46' # avp hand tracking work fine.
- # to
- # 'vuer==0.0.56', # In pico hand tracking mode, using a hand gesture to click the "Virtual Reality" button
- # causes the screen to stay black and stuck loading. But if the button is clicked with the controller instead, everything works fine.
- # In pico controller tracking mode, using controller to click the "Virtual Reality" button
- # causes the screen to stay black and stuck loading. But if the button is clicked with a hand gesture instead, everything works fine.
- # avp \ pico all have hand marker visualization (RGB three-axis color coordinates).
-
- 'vuer==0.0.60', # a good version
- # https://github.com/unitreerobotics/avp_teleoperate/issues/53
- # https://github.com/vuer-ai/vuer/issues/45
- # https://github.com/vuer-ai/vuer/issues/65
-
- ],
- python_requires='>=3.8',
-)
\ No newline at end of file
diff --git a/teleop/open_television/television.py b/teleop/open_television/television.py
deleted file mode 100644
index aa5c527..0000000
--- a/teleop/open_television/television.py
+++ /dev/null
@@ -1,514 +0,0 @@
-from vuer import Vuer
-from vuer.schemas import ImageBackground, Hands, MotionControllers, WebRTCVideoPlane, WebRTCStereoVideoPlane
-from multiprocessing import Value, Array, Process, shared_memory
-import numpy as np
-import asyncio
-import cv2
-import os
-
-
-class TeleVision:
- def __init__(self, binocular: bool, use_hand_tracking: bool, img_shape, img_shm_name, cert_file=None, key_file=None, ngrok=False, webrtc=False):
- """
- TeleVision class for OpenXR-based VR/AR applications.
- This class handles the communication with the Vuer server and manages the shared memory for image and pose data.
-
- :param binocular: bool, whether the application is binocular (stereoscopic) or monocular.
- :param use_hand_tracking: bool, whether to use hand tracking or controller tracking.
- :param img_shape: tuple, shape of the image (height, width, channels).
- :param img_shm_name: str, name of the shared memory for the image.
- :param cert_file: str, path to the SSL certificate file.
- :param key_file: str, path to the SSL key file.
- :param ngrok: bool, whether to use ngrok for tunneling.
- """
- self.binocular = binocular
- self.use_hand_tracking = use_hand_tracking
- self.img_height = img_shape[0]
- if self.binocular:
- self.img_width = img_shape[1] // 2
- else:
- self.img_width = img_shape[1]
-
- current_module_dir = os.path.dirname(os.path.abspath(__file__))
- if cert_file is None:
- cert_file = os.path.join(current_module_dir, "cert.pem")
- if key_file is None:
- key_file = os.path.join(current_module_dir, "key.pem")
-
- if ngrok:
- self.vuer = Vuer(host='0.0.0.0', queries=dict(grid=False), queue_len=3)
- else:
- self.vuer = Vuer(host='0.0.0.0', cert=cert_file, key=key_file, queries=dict(grid=False), queue_len=3)
-
- self.vuer.add_handler("CAMERA_MOVE")(self.on_cam_move)
- if self.use_hand_tracking:
- self.vuer.add_handler("HAND_MOVE")(self.on_hand_move)
- else:
- self.vuer.add_handler("CONTROLLER_MOVE")(self.on_controller_move)
-
- existing_shm = shared_memory.SharedMemory(name=img_shm_name)
- self.img_array = np.ndarray(img_shape, dtype=np.uint8, buffer=existing_shm.buf)
-
- self.webrtc = webrtc
- if self.binocular and not self.webrtc:
- self.vuer.spawn(start=False)(self.main_image_binocular)
- elif not self.binocular and not self.webrtc:
- self.vuer.spawn(start=False)(self.main_image_monocular)
- elif self.webrtc:
- self.vuer.spawn(start=False)(self.main_image_webrtc)
-
- self.head_pose_shared = Array('d', 16, lock=True)
- self.left_arm_pose_shared = Array('d', 16, lock=True)
- self.right_arm_pose_shared = Array('d', 16, lock=True)
- if self.use_hand_tracking:
- self.left_hand_position_shared = Array('d', 75, lock=True)
- self.right_hand_position_shared = Array('d', 75, lock=True)
- self.left_hand_orientation_shared = Array('d', 25 * 9, lock=True)
- self.right_hand_orientation_shared = Array('d', 25 * 9, lock=True)
-
- self.left_pinch_state_shared = Value('b', False, lock=True)
- self.left_pinch_value_shared = Value('d', 0.0, lock=True)
- self.left_squeeze_state_shared = Value('b', False, lock=True)
- self.left_squeeze_value_shared = Value('d', 0.0, lock=True)
-
- self.right_pinch_state_shared = Value('b', False, lock=True)
- self.right_pinch_value_shared = Value('d', 0.0, lock=True)
- self.right_squeeze_state_shared = Value('b', False, lock=True)
- self.right_squeeze_value_shared = Value('d', 0.0, lock=True)
- else:
- self.left_trigger_state_shared = Value('b', False, lock=True)
- self.left_trigger_value_shared = Value('d', 0.0, lock=True)
- self.left_squeeze_state_shared = Value('b', False, lock=True)
- self.left_squeeze_value_shared = Value('d', 0.0, lock=True)
- self.left_thumbstick_state_shared = Value('b', False, lock=True)
- self.left_thumbstick_value_shared = Array('d', 2, lock=True)
- self.left_aButton_shared = Value('b', False, lock=True)
- self.left_bButton_shared = Value('b', False, lock=True)
-
- self.right_trigger_state_shared = Value('b', False, lock=True)
- self.right_trigger_value_shared = Value('d', 0.0, lock=True)
- self.right_squeeze_state_shared = Value('b', False, lock=True)
- self.right_squeeze_value_shared = Value('d', 0.0, lock=True)
- self.right_thumbstick_state_shared = Value('b', False, lock=True)
- self.right_thumbstick_value_shared = Array('d', 2, lock=True)
- self.right_aButton_shared = Value('b', False, lock=True)
- self.right_bButton_shared = Value('b', False, lock=True)
-
- self.process = Process(target=self.vuer_run)
- self.process.daemon = True
- self.process.start()
-
- def vuer_run(self):
- self.vuer.run()
-
- async def on_cam_move(self, event, session, fps=60):
- try:
- with self.head_pose_shared.get_lock():
- self.head_pose_shared[:] = event.value["camera"]["matrix"]
- except:
- pass
-
- async def on_controller_move(self, event, session, fps=60):
- try:
- with self.left_arm_pose_shared.get_lock():
- self.left_arm_pose_shared[:] = event.value["left"]
- with self.right_arm_pose_shared.get_lock():
- self.right_arm_pose_shared[:] = event.value["right"]
-
- left_controller_state = event.value["leftState"]
- right_controller_state = event.value["rightState"]
-
- def extract_controller_states(state_dict, prefix):
- # trigger
- with getattr(self, f"{prefix}_trigger_state_shared").get_lock():
- getattr(self, f"{prefix}_trigger_state_shared").value = bool(state_dict.get("trigger", False))
- with getattr(self, f"{prefix}_trigger_value_shared").get_lock():
- getattr(self, f"{prefix}_trigger_value_shared").value = float(state_dict.get("triggerValue", 0.0))
- # squeeze
- with getattr(self, f"{prefix}_squeeze_state_shared").get_lock():
- getattr(self, f"{prefix}_squeeze_state_shared").value = bool(state_dict.get("squeeze", False))
- with getattr(self, f"{prefix}_squeeze_value_shared").get_lock():
- getattr(self, f"{prefix}_squeeze_value_shared").value = float(state_dict.get("squeezeValue", 0.0))
- # thumbstick
- with getattr(self, f"{prefix}_thumbstick_state_shared").get_lock():
- getattr(self, f"{prefix}_thumbstick_state_shared").value = bool(state_dict.get("thumbstick", False))
- with getattr(self, f"{prefix}_thumbstick_value_shared").get_lock():
- getattr(self, f"{prefix}_thumbstick_value_shared")[:] = state_dict.get("thumbstickValue", [0.0, 0.0])
- # buttons
- with getattr(self, f"{prefix}_aButton_shared").get_lock():
- getattr(self, f"{prefix}_aButton_shared").value = bool(state_dict.get("aButton", False))
- with getattr(self, f"{prefix}_bButton_shared").get_lock():
- getattr(self, f"{prefix}_bButton_shared").value = bool(state_dict.get("bButton", False))
-
- extract_controller_states(left_controller_state, "left")
- extract_controller_states(right_controller_state, "right")
- except:
- pass
-
- async def on_hand_move(self, event, session, fps=60):
- try:
- left_hand_data = event.value["left"]
- right_hand_data = event.value["right"]
- left_hand_state = event.value["leftState"]
- right_hand_state = event.value["rightState"]
-
- def extract_hand_poses(hand_data, arm_pose_shared, hand_position_shared, hand_orientation_shared):
- with arm_pose_shared.get_lock():
- arm_pose_shared[:] = hand_data[0:16]
-
- with hand_position_shared.get_lock():
- for i in range(25):
- base = i * 16
- hand_position_shared[i * 3: i * 3 + 3] = [hand_data[base + 12], hand_data[base + 13], hand_data[base + 14]]
-
- with hand_orientation_shared.get_lock():
- for i in range(25):
- base = i * 16
- hand_orientation_shared[i * 9: i * 9 + 9] = [
- hand_data[base + 0], hand_data[base + 1], hand_data[base + 2],
- hand_data[base + 4], hand_data[base + 5], hand_data[base + 6],
- hand_data[base + 8], hand_data[base + 9], hand_data[base + 10],
- ]
-
- def extract_hand_states(state_dict, prefix):
- # pinch
- with getattr(self, f"{prefix}_pinch_state_shared").get_lock():
- getattr(self, f"{prefix}_pinch_state_shared").value = bool(state_dict.get("pinch", False))
- with getattr(self, f"{prefix}_pinch_value_shared").get_lock():
- getattr(self, f"{prefix}_pinch_value_shared").value = float(state_dict.get("pinchValue", 0.0))
- # squeeze
- with getattr(self, f"{prefix}_squeeze_state_shared").get_lock():
- getattr(self, f"{prefix}_squeeze_state_shared").value = bool(state_dict.get("squeeze", False))
- with getattr(self, f"{prefix}_squeeze_value_shared").get_lock():
- getattr(self, f"{prefix}_squeeze_value_shared").value = float(state_dict.get("squeezeValue", 0.0))
-
- extract_hand_poses(left_hand_data, self.left_arm_pose_shared, self.left_hand_position_shared, self.left_hand_orientation_shared)
- extract_hand_poses(right_hand_data, self.right_arm_pose_shared, self.right_hand_position_shared, self.right_hand_orientation_shared)
- extract_hand_states(left_hand_state, "left")
- extract_hand_states(right_hand_state, "right")
-
- except:
- pass
-
- async def main_image_binocular(self, session, fps=60):
- if self.use_hand_tracking:
- session.upsert(
- Hands(
- stream=True,
- key="hands",
- hideLeft=True,
- hideRight=True
- ),
- to="bgChildren",
- )
- else:
- session.upsert(
- MotionControllers(
- stream=True,
- key="motionControllers",
- left=True,
- right=True,
- ),
- to="bgChildren",
- )
-
- while True:
- display_image = cv2.cvtColor(self.img_array, cv2.COLOR_BGR2RGB)
- # aspect_ratio = self.img_width / self.img_height
- session.upsert(
- [
- ImageBackground(
- display_image[:, :self.img_width],
- aspect=1.778,
- height=1,
- distanceToCamera=1,
- # The underlying rendering engine supported a layer binary bitmask for both objects and the camera.
- # Below we set the two image planes, left and right, to layers=1 and layers=2.
- # Note that these two masks are associated with left eye’s camera and the right eye’s camera.
- layers=1,
- format="jpeg",
- quality=100,
- key="background-left",
- interpolate=True,
- ),
- ImageBackground(
- display_image[:, self.img_width:],
- aspect=1.778,
- height=1,
- distanceToCamera=1,
- layers=2,
- format="jpeg",
- quality=100,
- key="background-right",
- interpolate=True,
- ),
- ],
- to="bgChildren",
- )
- # 'jpeg' encoding should give you about 30fps with a 16ms wait in-between.
- await asyncio.sleep(0.016 * 2)
-
- async def main_image_monocular(self, session, fps=60):
- if self.use_hand_tracking:
- session.upsert(
- Hands(
- stream=True,
- key="hands",
- hideLeft=True,
- hideRight=True
- ),
- to="bgChildren",
- )
- else:
- session.upsert(
- MotionControllers(
- stream=True,
- key="motionControllers",
- left=True,
- right=True,
- ),
- to="bgChildren",
- )
-
- while True:
- display_image = cv2.cvtColor(self.img_array, cv2.COLOR_BGR2RGB)
- # aspect_ratio = self.img_width / self.img_height
- session.upsert(
- [
- ImageBackground(
- display_image,
- aspect=1.778,
- height=1,
- distanceToCamera=1,
- format="jpeg",
- quality=50,
- key="background-mono",
- interpolate=True,
- ),
- ],
- to="bgChildren",
- )
- await asyncio.sleep(0.016)
-
- async def main_image_webrtc(self, session, fps=60):
- if self.use_hand_tracking:
- session.upsert(
- Hands(
- stream=True,
- key="hands",
- showLeft=False,
- showRight=False
- ),
- to="bgChildren",
- )
- else:
- session.upsert(
- MotionControllers(
- stream=True,
- key="motionControllers",
- showLeft=False,
- showRight=False,
- )
- )
-
- session.upsert(
- WebRTCVideoPlane(
- # WebRTCStereoVideoPlane(
- src="https://10.0.7.49:8080/offer",
- iceServer={},
- key="webrtc",
- aspect=1.778,
- height = 7,
- ),
- to="bgChildren",
- )
- while True:
- await asyncio.sleep(1)
- # ==================== common data ====================
- @property
- def head_pose(self):
- """np.ndarray, shape (4, 4), head SE(3) pose matrix from Vuer (basis OpenXR Convention)."""
- with self.head_pose_shared.get_lock():
- return np.array(self.head_pose_shared[:]).reshape(4, 4, order="F")
-
- @property
- def left_arm_pose(self):
- """np.ndarray, shape (4, 4), left arm SE(3) pose matrix from Vuer (basis OpenXR Convention)."""
- with self.left_arm_pose_shared.get_lock():
- return np.array(self.left_arm_pose_shared[:]).reshape(4, 4, order="F")
-
- @property
- def right_arm_pose(self):
- """np.ndarray, shape (4, 4), right arm SE(3) pose matrix from Vuer (basis OpenXR Convention)."""
- with self.right_arm_pose_shared.get_lock():
- return np.array(self.right_arm_pose_shared[:]).reshape(4, 4, order="F")
-
- # ==================== Hand Tracking Data ====================
- @property
- def left_hand_positions(self):
- """np.ndarray, shape (25, 3), left hand 25 landmarks' 3D positions."""
- with self.left_hand_position_shared.get_lock():
- return np.array(self.left_hand_position_shared[:]).reshape(25, 3)
-
- @property
- def right_hand_positions(self):
- """np.ndarray, shape (25, 3), right hand 25 landmarks' 3D positions."""
- with self.right_hand_position_shared.get_lock():
- return np.array(self.right_hand_position_shared[:]).reshape(25, 3)
-
- @property
- def left_hand_orientations(self):
- """np.ndarray, shape (25, 3, 3), left hand 25 landmarks' orientations (flattened 3x3 matrices, column-major)."""
- with self.left_hand_orientation_shared.get_lock():
- return np.array(self.left_hand_orientation_shared[:]).reshape(25, 9).reshape(25, 3, 3, order="F")
-
- @property
- def right_hand_orientations(self):
- """np.ndarray, shape (25, 3, 3), right hand 25 landmarks' orientations (flattened 3x3 matrices, column-major)."""
- with self.right_hand_orientation_shared.get_lock():
- return np.array(self.right_hand_orientation_shared[:]).reshape(25, 9).reshape(25, 3, 3, order="F")
-
- @property
- def left_hand_pinch_state(self):
- """bool, whether left hand is pinching."""
- with self.left_pinch_state_shared.get_lock():
- return self.left_pinch_state_shared.value
-
- @property
- def left_hand_pinch_value(self):
- """float, pinch strength of left hand."""
- with self.left_pinch_value_shared.get_lock():
- return self.left_pinch_value_shared.value
-
- @property
- def left_hand_squeeze_state(self):
- """bool, whether left hand is squeezing."""
- with self.left_squeeze_state_shared.get_lock():
- return self.left_squeeze_state_shared.value
-
- @property
- def left_hand_squeeze_value(self):
- """float, squeeze strength of left hand."""
- with self.left_squeeze_value_shared.get_lock():
- return self.left_squeeze_value_shared.value
-
- @property
- def right_hand_pinch_state(self):
- """bool, whether right hand is pinching."""
- with self.right_pinch_state_shared.get_lock():
- return self.right_pinch_state_shared.value
-
- @property
- def right_hand_pinch_value(self):
- """float, pinch strength of right hand."""
- with self.right_pinch_value_shared.get_lock():
- return self.right_pinch_value_shared.value
-
- @property
- def right_hand_squeeze_state(self):
- """bool, whether right hand is squeezing."""
- with self.right_squeeze_state_shared.get_lock():
- return self.right_squeeze_state_shared.value
-
- @property
- def right_hand_squeeze_value(self):
- """float, squeeze strength of right hand."""
- with self.right_squeeze_value_shared.get_lock():
- return self.right_squeeze_value_shared.value
-
- # ==================== Controller Data ====================
- @property
- def left_controller_trigger_state(self):
- """bool, left controller trigger pressed or not."""
- with self.left_trigger_state_shared.get_lock():
- return self.left_trigger_state_shared.value
-
- @property
- def left_controller_trigger_value(self):
- """float, left controller trigger analog value (0.0 ~ 1.0)."""
- with self.left_trigger_value_shared.get_lock():
- return self.left_trigger_value_shared.value
-
- @property
- def left_controller_squeeze_state(self):
- """bool, left controller squeeze pressed or not."""
- with self.left_squeeze_state_shared.get_lock():
- return self.left_squeeze_state_shared.value
-
- @property
- def left_controller_squeeze_value(self):
- """float, left controller squeeze analog value (0.0 ~ 1.0)."""
- with self.left_squeeze_value_shared.get_lock():
- return self.left_squeeze_value_shared.value
-
- @property
- def left_controller_thumbstick_state(self):
- """bool, whether left thumbstick is touched or clicked."""
- with self.left_thumbstick_state_shared.get_lock():
- return self.left_thumbstick_state_shared.value
-
- @property
- def left_controller_thumbstick_value(self):
- """np.ndarray, shape (2,), left thumbstick 2D axis values (x, y)."""
- with self.left_thumbstick_value_shared.get_lock():
- return np.array(self.left_thumbstick_value_shared[:])
-
- @property
- def left_controller_aButton(self):
- """bool, left controller 'A' button pressed."""
- with self.left_aButton_shared.get_lock():
- return self.left_aButton_shared.value
-
- @property
- def left_controller_bButton(self):
- """bool, left controller 'B' button pressed."""
- with self.left_bButton_shared.get_lock():
- return self.left_bButton_shared.value
-
- @property
- def right_controller_trigger_state(self):
- """bool, right controller trigger pressed or not."""
- with self.right_trigger_state_shared.get_lock():
- return self.right_trigger_state_shared.value
-
- @property
- def right_controller_trigger_value(self):
- """float, right controller trigger analog value (0.0 ~ 1.0)."""
- with self.right_trigger_value_shared.get_lock():
- return self.right_trigger_value_shared.value
-
- @property
- def right_controller_squeeze_state(self):
- """bool, right controller squeeze pressed or not."""
- with self.right_squeeze_state_shared.get_lock():
- return self.right_squeeze_state_shared.value
-
- @property
- def right_controller_squeeze_value(self):
- """float, right controller squeeze analog value (0.0 ~ 1.0)."""
- with self.right_squeeze_value_shared.get_lock():
- return self.right_squeeze_value_shared.value
-
- @property
- def right_controller_thumbstick_state(self):
- """bool, whether right thumbstick is touched or clicked."""
- with self.right_thumbstick_state_shared.get_lock():
- return self.right_thumbstick_state_shared.value
-
- @property
- def right_controller_thumbstick_value(self):
- """np.ndarray, shape (2,), right thumbstick 2D axis values (x, y)."""
- with self.right_thumbstick_value_shared.get_lock():
- return np.array(self.right_thumbstick_value_shared[:])
-
- @property
- def right_controller_aButton(self):
- """bool, right controller 'A' button pressed."""
- with self.right_aButton_shared.get_lock():
- return self.right_aButton_shared.value
-
- @property
- def right_controller_bButton(self):
- """bool, right controller 'B' button pressed."""
- with self.right_bButton_shared.get_lock():
- return self.right_bButton_shared.value
\ No newline at end of file
diff --git a/teleop/open_television/tv_wrapper.py b/teleop/open_television/tv_wrapper.py
deleted file mode 100644
index 92ca634..0000000
--- a/teleop/open_television/tv_wrapper.py
+++ /dev/null
@@ -1,410 +0,0 @@
-import numpy as np
-from television import TeleVision
-from dataclasses import dataclass, field
-
-"""
-(basis) OpenXR Convention : y up, z back, x right.
-(basis) Robot Convention : z up, y left, x front.
-
-under (basis) Robot Convention, humanoid arm's initial pose convention:
-
- # (initial pose) OpenXR Left Arm Pose Convention (hand tracking):
- - the x-axis pointing from wrist toward middle.
- - the y-axis pointing from index toward pinky.
- - the z-axis pointing from palm toward back of the hand.
-
- # (initial pose) OpenXR Right Arm Pose Convention (hand tracking):
- - the x-axis pointing from wrist toward middle.
- - the y-axis pointing from pinky toward index.
- - the z-axis pointing from palm toward back of the hand.
-
- # (initial pose) Unitree Humanoid Left Arm URDF Convention:
- - the x-axis pointing from wrist toward middle.
- - the y-axis pointing from palm toward back of the hand.
- - the z-axis pointing from pinky toward index.
-
- # (initial pose) Unitree Humanoid Right Arm URDF Convention:
- - the x-axis pointing from wrist toward middle.
- - the y-axis pointing from back of the hand toward palm.
- - the z-axis pointing from pinky toward index.
-
-under (basis) Robot Convention, humanoid hand's initial pose convention:
-
- # (initial pose) OpenXR Left Hand Pose Convention (hand tracking):
- - the x-axis pointing from wrist toward middle.
- - the y-axis pointing from index toward pinky.
- - the z-axis pointing from palm toward back of the hand.
-
- # (initial pose) OpenXR Right Hand Pose Convention (hand tracking):
- - the x-axis pointing from wrist toward middle.
- - the y-axis pointing from pinky toward index.
- - the z-axis pointing from palm toward back of the hand.
-
- # (initial pose) Unitree Humanoid Left Hand URDF Convention:
- - The x-axis pointing from palm toward back of the hand.
- - The y-axis pointing from middle toward wrist.
- - The z-axis pointing from pinky toward index.
-
- # (initial pose) Unitree Humanoid Right Hand URDF Convention:
- - The x-axis pointing from palm toward back of the hand.
- - The y-axis pointing from middle toward wrist.
- - The z-axis pointing from index toward pinky.
-
-p.s. TeleVision (Vuer) obtains all raw data under the (basis) OpenXR Convention.
- In addition, arm pose data (hand tracking) follows the (initial pose) OpenXR Arm Pose Convention,
- while arm pose data (controller tracking) directly follows the (initial pose) Unitree Humanoid Arm URDF Convention (thus no transform is needed).
- Meanwhile, all raw data is in the WORLD frame defined by XR device odometry.
-
-p.s. From website: https://registry.khronos.org/OpenXR/specs/1.1/man/html/openxr.html.
- You can find **(initial pose) OpenXR Left/Right Arm Pose Convention** related information like this below:
- "The wrist joint is located at the pivot point of the wrist, which is location invariant when twisting the hand without moving the forearm.
- The backward (+Z) direction is parallel to the line from wrist joint to middle finger metacarpal joint, and points away from the finger tips.
- The up (+Y) direction points out towards back of the hand and perpendicular to the skin at wrist.
- The X direction is perpendicular to the Y and Z directions and follows the right hand rule."
- Note: The above context is of course under **(basis) OpenXR Convention**.
-
-p.s. **Unitree Arm/Hand URDF initial pose Convention** information come from URDF files.
-"""
-
-
-def safe_mat_update(prev_mat, mat):
- # Return previous matrix and False flag if the new matrix is non-singular (determinant ≠ 0).
- det = np.linalg.det(mat)
- if not np.isfinite(det) or np.isclose(det, 0.0, atol=1e-6):
- return prev_mat, False
- return mat, True
-
-def fast_mat_inv(mat):
- ret = np.eye(4)
- ret[:3, :3] = mat[:3, :3].T
- ret[:3, 3] = -mat[:3, :3].T @ mat[:3, 3]
- return ret
-
-def safe_rot_update(prev_rot_array, rot_array):
- dets = np.linalg.det(rot_array)
- if not np.all(np.isfinite(dets)) or np.any(np.isclose(dets, 0.0, atol=1e-6)):
- return prev_rot_array, False
- return rot_array, True
-
-# constants variable
-T_TO_UNITREE_HUMANOID_LEFT_ARM = np.array([[1, 0, 0, 0],
- [0, 0,-1, 0],
- [0, 1, 0, 0],
- [0, 0, 0, 1]])
-
-T_TO_UNITREE_HUMANOID_RIGHT_ARM = np.array([[1, 0, 0, 0],
- [0, 0, 1, 0],
- [0,-1, 0, 0],
- [0, 0, 0, 1]])
-
-T_TO_UNITREE_HAND = np.array([[0, 0, 1, 0],
- [-1, 0, 0, 0],
- [0, -1, 0, 0],
- [0, 0, 0, 1]])
-
-T_ROBOT_OPENXR = np.array([[ 0, 0,-1, 0],
- [-1, 0, 0, 0],
- [ 0, 1, 0, 0],
- [ 0, 0, 0, 1]])
-
-T_OPENXR_ROBOT = np.array([[ 0,-1, 0, 0],
- [ 0, 0, 1, 0],
- [-1, 0, 0, 0],
- [ 0, 0, 0, 1]])
-
-R_ROBOT_OPENXR = np.array([[ 0, 0,-1],
- [-1, 0, 0],
- [ 0, 1, 0]])
-
-R_OPENXR_ROBOT = np.array([[ 0,-1, 0],
- [ 0, 0, 1],
- [-1, 0, 0]])
-
-CONST_HEAD_POSE = np.array([[1, 0, 0, 0],
- [0, 1, 0, 1.5],
- [0, 0, 1, -0.2],
- [0, 0, 0, 1]])
-
-# For Robot initial position
-CONST_RIGHT_ARM_POSE = np.array([[1, 0, 0, 0.15],
- [0, 1, 0, 1.13],
- [0, 0, 1, -0.3],
- [0, 0, 0, 1]])
-
-CONST_LEFT_ARM_POSE = np.array([[1, 0, 0, -0.15],
- [0, 1, 0, 1.13],
- [0, 0, 1, -0.3],
- [0, 0, 0, 1]])
-
-CONST_HAND_ROT = np.tile(np.eye(3)[None, :, :], (25, 1, 1))
-
-@dataclass
-class TeleStateData:
- # hand tracking
- left_pinch_state: bool = False # True if index and thumb are pinching
- left_squeeze_state: bool = False # True if hand is making a fist
- left_squeeze_value: float = 0.0 # (0.0 ~ 1.0) degree of hand squeeze
- right_pinch_state: bool = False # True if index and thumb are pinching
- right_squeeze_state: bool = False # True if hand is making a fist
- right_squeeze_value: float = 0.0 # (0.0 ~ 1.0) degree of hand squeeze
-
- # controller tracking
- left_trigger_state: bool = False # True if trigger is actively pressed
- left_squeeze_ctrl_state: bool = False # True if grip button is pressed
- left_squeeze_ctrl_value: float = 0.0 # (0.0 ~ 1.0) grip pull depth
- left_thumbstick_state: bool = False # True if thumbstick button is pressed
- left_thumbstick_value: np.ndarray = field(default_factory=lambda: np.zeros(2)) # 2D vector (x, y), normalized
- left_aButton: bool = False # True if A button is pressed
- left_bButton: bool = False # True if B button is pressed
- right_trigger_state: bool = False # True if trigger is actively pressed
- right_squeeze_ctrl_state: bool = False # True if grip button is pressed
- right_squeeze_ctrl_value: float = 0.0 # (0.0 ~ 1.0) grip pull depth
- right_thumbstick_state: bool = False # True if thumbstick button is pressed
- right_thumbstick_value: np.ndarray = field(default_factory=lambda: np.zeros(2)) # 2D vector (x, y), normalized
- right_aButton: bool = False # True if A button is pressed
- right_bButton: bool = False # True if B button is pressed
-
-@dataclass
-class TeleData:
- head_pose: np.ndarray # (4,4) SE(3) pose of head matrix
- left_arm_pose: np.ndarray # (4,4) SE(3) pose of left arm
- right_arm_pose: np.ndarray # (4,4) SE(3) pose of right arm
- left_hand_pos: np.ndarray = None # (25,3) 3D positions of left hand joints
- right_hand_pos: np.ndarray = None # (25,3) 3D positions of right hand joints
- left_hand_rot: np.ndarray = None # (25,3,3) rotation matrices of left hand joints
- right_hand_rot: np.ndarray = None # (25,3,3) rotation matrices of right hand joints
- left_pinch_value: float = None # float (1x.0 ~ 0.0) pinch distance between index and thumb
- right_pinch_value: float = None # float (1x.0 ~ 0.0) pinch distance between index and thumb
- left_trigger_value: float = None # float (10.0 ~ 0.0) trigger pull depth
- right_trigger_value: float = None # float (10.0 ~ 0.0) trigger pull depth
- tele_state: TeleStateData = field(default_factory=TeleStateData)
-
-
-class TeleVisionWrapper:
- def __init__(self, binocular: bool, use_hand_tracking: bool, img_shape, img_shm_name, return_state_data: bool = True, return_hand_rot_data: bool = False,
- cert_file = None, key_file = None, ngrok = False, webrtc = False):
- """
- TeleVisionWrapper is a wrapper for the TeleVision class, which handles XR device's data suit for robot control.
- It initializes the TeleVision instance with the specified parameters and provides a method to get motion state data.
-
- :param binocular: A boolean indicating whether the head camera device is binocular or not.
- :param use_hand_tracking: A boolean indicating whether to use hand tracking or use controller tracking.
- :param img_shape: The shape of the image to be processed.
- :param img_shm_name: The name of the shared memory for the image.
- :param return_state: A boolean indicating whether to return the state of the hand or controller.
- :param return_hand_rot: A boolean indicating whether to return the hand rotation data.
- :param cert_file: The path to the certificate file for secure connection.
- :param key_file: The path to the key file for secure connection.
- """
- self.use_hand_tracking = use_hand_tracking
- self.return_state_data = return_state_data
- self.return_hand_rot_data = return_hand_rot_data
- self.tvuer = TeleVision(binocular, use_hand_tracking, img_shape, img_shm_name, cert_file=cert_file, key_file=key_file,
- ngrok=ngrok, webrtc=webrtc)
-
- def get_motion_state_data(self):
- """
- Get processed motion state data from the TeleVision instance.
-
- All returned data are transformed from the OpenXR Convention to the (Robot & Unitree) Convention.
- """
- # Variable Naming Convention below,
- # ┌────────────┬───────────────────────────┬──────────────────────────────────┬────────────────────────────────────┬────────────────────────────────────┐
- # │left / right│ Bxr │ Brobot │ IPxr │ IPunitree │
- # │────────────│───────────────────────────│──────────────────────────────────│────────────────────────────────────│────────────────────────────────────│
- # │ side │ (basis) OpenXR Convention │ (basis) Robot Convention │ (initial pose) OpenXR Convention │ (initial pose) Unitree Convention │
- # └────────────┴───────────────────────────┴──────────────────────────────────┴────────────────────────────────────┴────────────────────────────────────┘
- # ┌───────────────────────────────────┬─────────────────────┐
- # │ world / arm / head / waist │ arm / head / hand │
- # │───────────────────────────────────│─────────────────────│
- # │ source frame │ target frame │
- # └───────────────────────────────────┴─────────────────────┘
-
- # TeleVision (Vuer) obtains all raw data under the (basis) OpenXR Convention.
- Bxr_world_head, head_pose_is_valid = safe_mat_update(CONST_HEAD_POSE, self.tvuer.head_pose)
-
- if self.use_hand_tracking:
- # 'Arm' pose data follows (basis) OpenXR Convention and (initial pose) OpenXR Arm Convention.
- left_IPxr_Bxr_world_arm, left_arm_is_valid = safe_mat_update(CONST_LEFT_ARM_POSE, self.tvuer.left_arm_pose)
- right_IPxr_Bxr_world_arm, right_arm_is_valid = safe_mat_update(CONST_RIGHT_ARM_POSE, self.tvuer.right_arm_pose)
-
- # Change basis convention
- # From (basis) OpenXR Convention to (basis) Robot Convention:
- # Brobot_Pose = T_{robot}_{openxr} * Bxr_Pose * T_{robot}_{openxr}^T ==>
- # Brobot_Pose = T_{robot}_{openxr} * Bxr_Pose * T_{openxr}_{robot}
- # Reason for right multiply T_OPENXR_ROBOT = fast_mat_inv(T_ROBOT_OPENXR):
- # This is similarity transformation: B = PAP^{-1}, that is B ~ A
- # For example:
- # - For a pose data T_r under the (basis) Robot Convention, left-multiplying Brobot_Pose means:
- # Brobot_Pose * T_r ==> T_{robot}_{openxr} * PoseMatrix_openxr * T_{openxr}_{robot} * T_r
- # - First, transform T_r to the (basis) OpenXR Convention (The function of T_{openxr}_{robot})
- # - Then, apply the rotation PoseMatrix_openxr in the OpenXR Convention (The function of PoseMatrix_openxr)
- # - Finally, transform back to the Robot Convention (The function of T_{robot}_{openxr})
- # - This results in the same rotation effect under the Robot Convention as in the OpenXR Convention.
- Brobot_world_head = T_ROBOT_OPENXR @ Bxr_world_head @ T_OPENXR_ROBOT
- left_IPxr_Brobot_world_arm = T_ROBOT_OPENXR @ left_IPxr_Bxr_world_arm @ T_OPENXR_ROBOT
- right_IPxr_Brobot_world_arm = T_ROBOT_OPENXR @ right_IPxr_Bxr_world_arm @ T_OPENXR_ROBOT
-
- # Change initial pose convention
- # From (initial pose) OpenXR Arm Convention to (initial pose) Unitree Humanoid Arm URDF Convention
- # Reason for right multiply (T_TO_UNITREE_HUMANOID_LEFT_ARM) : Rotate 90 degrees counterclockwise about its own x-axis.
- # Reason for right multiply (T_TO_UNITREE_HUMANOID_RIGHT_ARM): Rotate 90 degrees clockwise about its own x-axis.
- left_IPunitree_Brobot_world_arm = left_IPxr_Brobot_world_arm @ (T_TO_UNITREE_HUMANOID_LEFT_ARM if left_arm_is_valid else np.eye(4))
- right_IPunitree_Brobot_world_arm = right_IPxr_Brobot_world_arm @ (T_TO_UNITREE_HUMANOID_RIGHT_ARM if right_arm_is_valid else np.eye(4))
-
- # Transfer from WORLD to HEAD coordinate (translation adjustment only)
- left_IPunitree_Brobot_head_arm = left_IPunitree_Brobot_world_arm.copy()
- right_IPunitree_Brobot_head_arm = right_IPunitree_Brobot_world_arm.copy()
- left_IPunitree_Brobot_head_arm[0:3, 3] = left_IPunitree_Brobot_head_arm[0:3, 3] - Brobot_world_head[0:3, 3]
- right_IPunitree_Brobot_head_arm[0:3, 3] = right_IPunitree_Brobot_world_arm[0:3, 3] - Brobot_world_head[0:3, 3]
-
- # =====coordinate origin offset=====
- # The origin of the coordinate for IK Solve is near the WAIST joint motor. You can use teleop/robot_control/robot_arm_ik.py Unit_Test to visualize it.
- # The origin of the coordinate of IPunitree_Brobot_head_arm is HEAD.
- # So it is necessary to translate the origin of IPunitree_Brobot_head_arm from HEAD to WAIST.
- left_IPunitree_Brobot_waist_arm = left_IPunitree_Brobot_head_arm.copy()
- right_IPunitree_Brobot_waist_arm = right_IPunitree_Brobot_head_arm.copy()
- left_IPunitree_Brobot_waist_arm[0, 3] +=0.15 # x
- right_IPunitree_Brobot_waist_arm[0,3] +=0.15
- left_IPunitree_Brobot_waist_arm[2, 3] +=0.45 # z
- right_IPunitree_Brobot_waist_arm[2,3] +=0.45
-
- # -----------------------------------hand position----------------------------------------
- if left_arm_is_valid and right_arm_is_valid:
- # Homogeneous, [xyz] to [xyz1]
- # np.concatenate([25,3]^T,(1,25)) ==> Bxr_world_hand_pos.shape is (4,25)
- # Now under (basis) OpenXR Convention, Bxr_world_hand_pos data like this:
- # [x0 x1 x2 ··· x23 x24]
- # [y0 y1 y1 ··· y23 y24]
- # [z0 z1 z2 ··· z23 z24]
- # [ 1 1 1 ··· 1 1]
- left_IPxr_Bxr_world_hand_pos = np.concatenate([self.tvuer.left_hand_positions.T, np.ones((1, self.tvuer.left_hand_positions.shape[0]))])
- right_IPxr_Bxr_world_hand_pos = np.concatenate([self.tvuer.right_hand_positions.T, np.ones((1, self.tvuer.right_hand_positions.shape[0]))])
-
- # Change basis convention
- # From (basis) OpenXR Convention to (basis) Robot Convention
- # Just a change of basis for 3D points. No rotation, only translation. So, no need to right-multiply fast_mat_inv(T_ROBOT_OPENXR).
- left_IPxr_Brobot_world_hand_pos = T_ROBOT_OPENXR @ left_IPxr_Bxr_world_hand_pos
- right_IPxr_Brobot_world_hand_pos = T_ROBOT_OPENXR @ right_IPxr_Bxr_world_hand_pos
-
- # Transfer from WORLD to ARM frame under (basis) Robot Convention:
- # Brobot_{world}_{arm}^T * Brobot_{world}_pos ==> Brobot_{arm}_{world} * Brobot_{world}_pos ==> Brobot_arm_hand_pos, Now it's based on the arm frame.
- left_IPxr_Brobot_arm_hand_pos = fast_mat_inv(left_IPxr_Brobot_world_arm) @ left_IPxr_Brobot_world_hand_pos
- right_IPxr_Brobot_arm_hand_pos = fast_mat_inv(right_IPxr_Brobot_world_arm) @ right_IPxr_Brobot_world_hand_pos
-
- # Change initial pose convention
- # From (initial pose) XR Hand Convention to (initial pose) Unitree Humanoid Hand URDF Convention:
- # T_TO_UNITREE_HAND @ IPxr_Brobot_arm_hand_pos ==> IPunitree_Brobot_arm_hand_pos
- # ((4,4) @ (4,25))[0:3, :].T ==> (4,25)[0:3, :].T ==> (3,25).T ==> (25,3)
- # Now under (initial pose) Unitree Humanoid Hand URDF Convention, matrix shape like this:
- # [x0, y0, z0]
- # [x1, y1, z1]
- # ···
- # [x23,y23,z23]
- # [x24,y24,z24]
- left_IPunitree_Brobot_arm_hand_pos = (T_TO_UNITREE_HAND @ left_IPxr_Brobot_arm_hand_pos)[0:3, :].T
- right_IPunitree_Brobot_arm_hand_pos = (T_TO_UNITREE_HAND @ right_IPxr_Brobot_arm_hand_pos)[0:3, :].T
- else:
- left_IPunitree_Brobot_arm_hand_pos = np.zeros((25, 3))
- right_IPunitree_Brobot_arm_hand_pos = np.zeros((25, 3))
-
- # -----------------------------------hand rotation----------------------------------------
- if self.return_hand_rot_data:
- left_Bxr_world_hand_rot, left_hand_rot_is_valid = safe_rot_update(CONST_HAND_ROT, self.tvuer.left_hand_orientations) # [25, 3, 3]
- right_Bxr_world_hand_rot, right_hand_rot_is_valid = safe_rot_update(CONST_HAND_ROT, self.tvuer.right_hand_orientations)
-
- if left_hand_rot_is_valid and right_hand_rot_is_valid:
- left_Bxr_arm_hand_rot = np.einsum('ij,njk->nik', left_IPxr_Bxr_world_arm[:3, :3].T, left_Bxr_world_hand_rot)
- right_Bxr_arm_hand_rot = np.einsum('ij,njk->nik', right_IPxr_Bxr_world_arm[:3, :3].T, right_Bxr_world_hand_rot)
- # Change basis convention
- left_Brobot_arm_hand_rot = np.einsum('ij,njk,kl->nil', R_ROBOT_OPENXR, left_Bxr_arm_hand_rot, R_OPENXR_ROBOT)
- right_Brobot_arm_hand_rot = np.einsum('ij,njk,kl->nil', R_ROBOT_OPENXR, right_Bxr_arm_hand_rot, R_OPENXR_ROBOT)
- else:
- left_Brobot_arm_hand_rot = left_Bxr_world_hand_rot
- right_Brobot_arm_hand_rot = right_Bxr_world_hand_rot
- else:
- left_Brobot_arm_hand_rot = None
- right_Brobot_arm_hand_rot = None
-
- if self.return_state_data:
- hand_state = TeleStateData(
- left_pinch_state=self.tvuer.left_hand_pinch_state,
- left_squeeze_state=self.tvuer.left_hand_squeeze_state,
- left_squeeze_value=self.tvuer.left_hand_squeeze_value,
- right_pinch_state=self.tvuer.right_hand_pinch_state,
- right_squeeze_state=self.tvuer.right_hand_squeeze_state,
- right_squeeze_value=self.tvuer.right_hand_squeeze_value,
- )
- else:
- hand_state = None
-
- return TeleData(
- head_pose=Brobot_world_head,
- left_arm_pose=left_IPunitree_Brobot_waist_arm,
- right_arm_pose=right_IPunitree_Brobot_waist_arm,
- left_hand_pos=left_IPunitree_Brobot_arm_hand_pos,
- right_hand_pos=right_IPunitree_Brobot_arm_hand_pos,
- left_hand_rot=left_Brobot_arm_hand_rot,
- right_hand_rot=right_Brobot_arm_hand_rot,
- left_pinch_value=self.tvuer.left_hand_pinch_value * 100.0,
- right_pinch_value=self.tvuer.right_hand_pinch_value * 100.0,
- tele_state=hand_state
- )
- else:
- # Controller pose data directly follows the (initial pose) Unitree Humanoid Arm URDF Convention (thus no transform is needed).
- left_IPunitree_Bxr_world_arm, left_arm_is_valid = safe_mat_update(CONST_LEFT_ARM_POSE, self.tvuer.left_arm_pose)
- right_IPunitree_Bxr_world_arm, right_arm_is_valid = safe_mat_update(CONST_RIGHT_ARM_POSE, self.tvuer.right_arm_pose)
-
- # Change basis convention
- Brobot_world_head = T_ROBOT_OPENXR @ Bxr_world_head @ T_OPENXR_ROBOT
- left_IPunitree_Brobot_world_arm = T_ROBOT_OPENXR @ left_IPunitree_Bxr_world_arm @ T_OPENXR_ROBOT
- right_IPunitree_Brobot_world_arm = T_ROBOT_OPENXR @ right_IPunitree_Bxr_world_arm @ T_OPENXR_ROBOT
-
- # Transfer from WORLD to HEAD coordinate (translation adjustment only)
- left_IPunitree_Brobot_head_arm = left_IPunitree_Brobot_world_arm.copy()
- right_IPunitree_Brobot_head_arm = right_IPunitree_Brobot_world_arm.copy()
- left_IPunitree_Brobot_head_arm[0:3, 3] = left_IPunitree_Brobot_head_arm[0:3, 3] - Brobot_world_head[0:3, 3]
- right_IPunitree_Brobot_head_arm[0:3, 3] = right_IPunitree_Brobot_head_arm[0:3, 3] - Brobot_world_head[0:3, 3]
-
- # =====coordinate origin offset=====
- # The origin of the coordinate for IK Solve is near the WAIST joint motor. You can use teleop/robot_control/robot_arm_ik.py Unit_Test to check it.
- # The origin of the coordinate of IPunitree_Brobot_head_arm is HEAD.
- # So it is necessary to translate the origin of IPunitree_Brobot_head_arm from HEAD to WAIST.
- left_IPunitree_Brobot_waist_arm = left_IPunitree_Brobot_head_arm.copy()
- right_IPunitree_Brobot_waist_arm = right_IPunitree_Brobot_head_arm.copy()
- left_IPunitree_Brobot_waist_arm[0, 3] +=0.15 # x
- right_IPunitree_Brobot_waist_arm[0,3] +=0.15
- left_IPunitree_Brobot_waist_arm[2, 3] +=0.45 # z
- right_IPunitree_Brobot_waist_arm[2,3] +=0.45
- # left_IPunitree_Brobot_waist_arm[1, 3] +=0.02 # y
- # right_IPunitree_Brobot_waist_arm[1,3] +=0.02
-
- # return data
- if self.return_state_data:
- controller_state = TeleStateData(
- left_trigger_state=self.tvuer.left_controller_trigger_state,
- left_squeeze_ctrl_state=self.tvuer.left_controller_squeeze_state,
- left_squeeze_ctrl_value=self.tvuer.left_controller_squeeze_value,
- left_thumbstick_state=self.tvuer.left_controller_thumbstick_state,
- left_thumbstick_value=self.tvuer.left_controller_thumbstick_value,
- left_aButton=self.tvuer.left_controller_aButton,
- left_bButton=self.tvuer.left_controller_bButton,
- right_trigger_state=self.tvuer.right_controller_trigger_state,
- right_squeeze_ctrl_state=self.tvuer.right_controller_squeeze_state,
- right_squeeze_ctrl_value=self.tvuer.right_controller_squeeze_value,
- right_thumbstick_state=self.tvuer.right_controller_thumbstick_state,
- right_thumbstick_value=self.tvuer.right_controller_thumbstick_value,
- right_aButton=self.tvuer.right_controller_aButton,
- right_bButton=self.tvuer.right_controller_bButton,
- )
- else:
- controller_state = None
-
- return TeleData(
- head_pose=Brobot_world_head,
- left_arm_pose=left_IPunitree_Brobot_waist_arm,
- right_arm_pose=right_IPunitree_Brobot_waist_arm,
- left_trigger_value=10.0 - self.tvuer.left_controller_trigger_value * 10,
- right_trigger_value=10.0 - self.tvuer.right_controller_trigger_value * 10,
- tele_state=controller_state
- )
\ No newline at end of file
diff --git a/teleop/robot_control/dex-retargeting b/teleop/robot_control/dex-retargeting
new file mode 160000
index 0000000..d7753d3
--- /dev/null
+++ b/teleop/robot_control/dex-retargeting
@@ -0,0 +1 @@
+Subproject commit d7753d38c9ff11f80bafea6cd168351fd3db9b0e
diff --git a/teleop/robot_control/dex_retargeting/CITATION.cff b/teleop/robot_control/dex_retargeting/CITATION.cff
deleted file mode 100644
index 770c132..0000000
--- a/teleop/robot_control/dex_retargeting/CITATION.cff
+++ /dev/null
@@ -1,22 +0,0 @@
-cff-version: 1.2.0
-authors:
- - family-names: "Qin"
- given-names: "Yuzhe"
- - family-names: "Yang"
- given-names: "Wei"
- - family-names: "Huang"
- given-names: "Binghao"
- - family-names: "Van Wyk"
- given-names: "Karl"
- - family-names: "Su"
- given-names: "Hao"
- - family-names: "Wang"
- given-names: "Xiaolong"
- - family-names: "Chao"
- given-names: "Yu-Wei"
- - family-names: "Fox"
- given-names: "Dieter"
-date-released: 2023-10-25
-title: "AnyTeleop"
-message: "Thanks for using dex-retargeting. If you use this software, please cite it as below."
-url: "https://github.com/dexsuite/dex-retargeting"
diff --git a/teleop/robot_control/dex_retargeting/LICENSE b/teleop/robot_control/dex_retargeting/LICENSE
deleted file mode 100644
index 15904fb..0000000
--- a/teleop/robot_control/dex_retargeting/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2023 Yuzhe Qin
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
\ No newline at end of file
diff --git a/teleop/robot_control/dex_retargeting/__init__.py b/teleop/robot_control/dex_retargeting/__init__.py
deleted file mode 100644
index 3dd3d2d..0000000
--- a/teleop/robot_control/dex_retargeting/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.4.6"
diff --git a/teleop/robot_control/dex_retargeting/constants.py b/teleop/robot_control/dex_retargeting/constants.py
deleted file mode 100644
index 4b7147a..0000000
--- a/teleop/robot_control/dex_retargeting/constants.py
+++ /dev/null
@@ -1,85 +0,0 @@
-import enum
-from pathlib import Path
-from typing import Optional
-
-import numpy as np
-
-OPERATOR2MANO_RIGHT = np.array(
- [
- [0, 0, -1],
- [-1, 0, 0],
- [0, 1, 0],
- ]
-)
-
-OPERATOR2MANO_LEFT = np.array(
- [
- [0, 0, -1],
- [1, 0, 0],
- [0, -1, 0],
- ]
-)
-
-
-class RobotName(enum.Enum):
- allegro = enum.auto()
- shadow = enum.auto()
- svh = enum.auto()
- leap = enum.auto()
- ability = enum.auto()
- inspire = enum.auto()
- panda = enum.auto()
-
-
-class RetargetingType(enum.Enum):
- vector = enum.auto() # For teleoperation, no finger closing prior
- position = enum.auto() # For offline data processing, especially hand-object interaction data
- dexpilot = enum.auto() # For teleoperation, with finger closing prior
-
-
-class HandType(enum.Enum):
- right = enum.auto()
- left = enum.auto()
-
-
-ROBOT_NAME_MAP = {
- RobotName.allegro: "allegro_hand",
- RobotName.shadow: "shadow_hand",
- RobotName.svh: "schunk_svh_hand",
- RobotName.leap: "leap_hand",
- RobotName.ability: "ability_hand",
- RobotName.inspire: "inspire_hand",
- RobotName.panda: "panda_gripper",
-}
-
-ROBOT_NAMES = list(ROBOT_NAME_MAP.keys())
-
-
-def get_default_config_path(
- robot_name: RobotName, retargeting_type: RetargetingType, hand_type: HandType
-) -> Optional[Path]:
- config_path = Path(__file__).parent / "configs"
- if retargeting_type is RetargetingType.position:
- config_path = config_path / "offline"
- else:
- config_path = config_path / "teleop"
-
- robot_name_str = ROBOT_NAME_MAP[robot_name]
- hand_type_str = hand_type.name
- if "gripper" in robot_name_str: # For gripper robots, only use gripper config file.
- if retargeting_type == RetargetingType.dexpilot:
- config_name = f"{robot_name_str}_dexpilot.yml"
- else:
- config_name = f"{robot_name_str}.yml"
- else:
- if retargeting_type == RetargetingType.dexpilot:
- config_name = f"{robot_name_str}_{hand_type_str}_dexpilot.yml"
- else:
- config_name = f"{robot_name_str}_{hand_type_str}.yml"
- return config_path / config_name
-
-
-OPERATOR2MANO = {
- HandType.right: OPERATOR2MANO_RIGHT,
- HandType.left: OPERATOR2MANO_LEFT,
-}
diff --git a/teleop/robot_control/dex_retargeting/kinematics_adaptor.py b/teleop/robot_control/dex_retargeting/kinematics_adaptor.py
deleted file mode 100644
index 045a49f..0000000
--- a/teleop/robot_control/dex_retargeting/kinematics_adaptor.py
+++ /dev/null
@@ -1,102 +0,0 @@
-from abc import abstractmethod
-from typing import List
-
-import numpy as np
-
-from .robot_wrapper import RobotWrapper
-
-
-class KinematicAdaptor:
- def __init__(self, robot: RobotWrapper, target_joint_names: List[str]):
- self.robot = robot
- self.target_joint_names = target_joint_names
-
- # Index mapping
- self.idx_pin2target = np.array([robot.get_joint_index(n) for n in target_joint_names])
-
- @abstractmethod
- def forward_qpos(self, qpos: np.ndarray) -> np.ndarray:
- """
- Adapt the joint position for different kinematics constraints.
- Note that the joint order of this qpos is consistent with pinocchio
- Args:
- qpos: the pinocchio qpos
-
- Returns: the adapted qpos with the same shape as input
-
- """
- pass
-
- @abstractmethod
- def backward_jacobian(self, jacobian: np.ndarray) -> np.ndarray:
- """
- Adapt the jacobian for different kinematics applications.
- Note that the joint order of this Jacobian is consistent with pinocchio
- Args:
- jacobian: the original jacobian
-
- Returns: the adapted jacobian with the same shape as input
-
- """
- pass
-
-
-class MimicJointKinematicAdaptor(KinematicAdaptor):
- def __init__(
- self,
- robot: RobotWrapper,
- target_joint_names: List[str],
- source_joint_names: List[str],
- mimic_joint_names: List[str],
- multipliers: List[float],
- offsets: List[float],
- ):
- super().__init__(robot, target_joint_names)
-
- self.multipliers = np.array(multipliers)
- self.offsets = np.array(offsets)
-
- # Joint name check
- union_set = set(mimic_joint_names).intersection(set(target_joint_names))
- if len(union_set) > 0:
- raise ValueError(
- f"Mimic joint should not be one of the target joints.\n"
- f"Mimic joints: {mimic_joint_names}.\n"
- f"Target joints: {target_joint_names}\n"
- f"You need to specify the target joint names explicitly in your retargeting config"
- f" for robot with mimic joint constraints: {target_joint_names}"
- )
-
- # Indices in the pinocchio
- self.idx_pin2source = np.array([robot.get_joint_index(name) for name in source_joint_names])
- self.idx_pin2mimic = np.array([robot.get_joint_index(name) for name in mimic_joint_names])
-
- # Indices in the output results
- self.idx_target2source = np.array([self.target_joint_names.index(n) for n in source_joint_names])
-
- # Dimension check
- len_source, len_mimic = self.idx_target2source.shape[0], self.idx_pin2mimic.shape[0]
- len_mul, len_offset = self.multipliers.shape[0], self.offsets.shape[0]
- if not (len_mimic == len_source == len_mul == len_offset):
- raise ValueError(
- f"Mimic joints setting dimension mismatch.\n"
- f"Source joints: {len_source}, mimic joints: {len_mimic}, multiplier: {len_mul}, offset: {len_offset}"
- )
- self.num_active_joints = len(robot.dof_joint_names) - len_mimic
-
- # Uniqueness check
- if len(mimic_joint_names) != len(np.unique(mimic_joint_names)):
- raise ValueError(f"Redundant mimic joint names: {mimic_joint_names}")
-
- def forward_qpos(self, pin_qpos: np.ndarray) -> np.ndarray:
- mimic_qpos = pin_qpos[self.idx_pin2source] * self.multipliers + self.offsets
- pin_qpos[self.idx_pin2mimic] = mimic_qpos
- return pin_qpos
-
- def backward_jacobian(self, jacobian: np.ndarray) -> np.ndarray:
- target_jacobian = jacobian[..., self.idx_pin2target]
- mimic_joint_jacobian = jacobian[..., self.idx_pin2mimic] * self.multipliers
-
- for i, index in enumerate(self.idx_target2source):
- target_jacobian[..., index] += mimic_joint_jacobian[..., i]
- return target_jacobian
diff --git a/teleop/robot_control/dex_retargeting/optimizer.py b/teleop/robot_control/dex_retargeting/optimizer.py
deleted file mode 100644
index ed891fe..0000000
--- a/teleop/robot_control/dex_retargeting/optimizer.py
+++ /dev/null
@@ -1,516 +0,0 @@
-from abc import abstractmethod
-from typing import List, Optional
-
-import nlopt
-import numpy as np
-import torch
-
-from .kinematics_adaptor import KinematicAdaptor, MimicJointKinematicAdaptor
-from .robot_wrapper import RobotWrapper
-
-
-class Optimizer:
- retargeting_type = "BASE"
-
- def __init__(
- self,
- robot: RobotWrapper,
- target_joint_names: List[str],
- target_link_human_indices: np.ndarray,
- ):
- self.robot = robot
- self.num_joints = robot.dof
-
- joint_names = robot.dof_joint_names
- idx_pin2target = []
- for target_joint_name in target_joint_names:
- if target_joint_name not in joint_names:
- raise ValueError(f"Joint {target_joint_name} given does not appear to be in robot XML.")
- idx_pin2target.append(joint_names.index(target_joint_name))
- self.target_joint_names = target_joint_names
- self.idx_pin2target = np.array(idx_pin2target)
-
- self.idx_pin2fixed = np.array([i for i in range(robot.dof) if i not in idx_pin2target], dtype=int)
- self.opt = nlopt.opt(nlopt.LD_SLSQP, len(idx_pin2target))
- self.opt_dof = len(idx_pin2target) # This dof includes the mimic joints
-
- # Target
- self.target_link_human_indices = target_link_human_indices
-
- # Free joint
- link_names = robot.link_names
- self.has_free_joint = len([name for name in link_names if "dummy" in name]) >= 6
-
- # Kinematics adaptor
- self.adaptor: Optional[KinematicAdaptor] = None
-
- def set_joint_limit(self, joint_limits: np.ndarray, epsilon=1e-3):
- if joint_limits.shape != (self.opt_dof, 2):
- raise ValueError(f"Expect joint limits have shape: {(self.opt_dof, 2)}, but get {joint_limits.shape}")
- self.opt.set_lower_bounds((joint_limits[:, 0] - epsilon).tolist())
- self.opt.set_upper_bounds((joint_limits[:, 1] + epsilon).tolist())
-
- def get_link_indices(self, target_link_names):
- return [self.robot.get_link_index(link_name) for link_name in target_link_names]
-
- def set_kinematic_adaptor(self, adaptor: KinematicAdaptor):
- self.adaptor = adaptor
-
- # Remove mimic joints from fixed joint list
- if isinstance(adaptor, MimicJointKinematicAdaptor):
- fixed_idx = self.idx_pin2fixed
- mimic_idx = adaptor.idx_pin2mimic
- new_fixed_id = np.array([x for x in fixed_idx if x not in mimic_idx], dtype=int)
- self.idx_pin2fixed = new_fixed_id
-
- def retarget(self, ref_value, fixed_qpos, last_qpos):
- """
- Compute the retargeting results using non-linear optimization
- Args:
- ref_value: the reference value in cartesian space as input, different optimizer has different reference
- fixed_qpos: the fixed value (not optimized) in retargeting, consistent with self.fixed_joint_names
- last_qpos: the last retargeting results or initial value, consistent with function return
-
- Returns: joint position of robot, the joint order and dim is consistent with self.target_joint_names
-
- """
- if len(fixed_qpos) != len(self.idx_pin2fixed):
- raise ValueError(
- f"Optimizer has {len(self.idx_pin2fixed)} joints but non_target_qpos {fixed_qpos} is given"
- )
- objective_fn = self.get_objective_function(ref_value, fixed_qpos, np.array(last_qpos).astype(np.float32))
-
- self.opt.set_min_objective(objective_fn)
- try:
- qpos = self.opt.optimize(last_qpos)
- return np.array(qpos, dtype=np.float32)
- except RuntimeError as e:
- print(e)
- return np.array(last_qpos, dtype=np.float32)
-
- @abstractmethod
- def get_objective_function(self, ref_value: np.ndarray, fixed_qpos: np.ndarray, last_qpos: np.ndarray):
- pass
-
- @property
- def fixed_joint_names(self):
- joint_names = self.robot.dof_joint_names
- return [joint_names[i] for i in self.idx_pin2fixed]
-
-
-class PositionOptimizer(Optimizer):
- retargeting_type = "POSITION"
-
- def __init__(
- self,
- robot: RobotWrapper,
- target_joint_names: List[str],
- target_link_names: List[str],
- target_link_human_indices: np.ndarray,
- huber_delta=0.02,
- norm_delta=4e-3,
- ):
- super().__init__(robot, target_joint_names, target_link_human_indices)
- self.body_names = target_link_names
- self.huber_loss = torch.nn.SmoothL1Loss(beta=huber_delta)
- self.norm_delta = norm_delta
-
- # Sanity check and cache link indices
- self.target_link_indices = self.get_link_indices(target_link_names)
-
- self.opt.set_ftol_abs(1e-5)
-
- def get_objective_function(self, target_pos: np.ndarray, fixed_qpos: np.ndarray, last_qpos: np.ndarray):
- qpos = np.zeros(self.num_joints)
- qpos[self.idx_pin2fixed] = fixed_qpos
- torch_target_pos = torch.as_tensor(target_pos)
- torch_target_pos.requires_grad_(False)
-
- def objective(x: np.ndarray, grad: np.ndarray) -> float:
- qpos[self.idx_pin2target] = x
-
- # Kinematics forwarding for qpos
- if self.adaptor is not None:
- qpos[:] = self.adaptor.forward_qpos(qpos)[:]
-
- self.robot.compute_forward_kinematics(qpos)
- target_link_poses = [self.robot.get_link_pose(index) for index in self.target_link_indices]
- body_pos = np.stack([pose[:3, 3] for pose in target_link_poses], axis=0) # (n ,3)
-
- # Torch computation for accurate loss and grad
- torch_body_pos = torch.as_tensor(body_pos)
- torch_body_pos.requires_grad_()
-
- # Loss term for kinematics retargeting based on 3D position error
- huber_distance = self.huber_loss(torch_body_pos, torch_target_pos)
- result = huber_distance.cpu().detach().item()
-
- if grad.size > 0:
- jacobians = []
- for i, index in enumerate(self.target_link_indices):
- link_body_jacobian = self.robot.compute_single_link_local_jacobian(qpos, index)[:3, ...]
- link_pose = target_link_poses[i]
- link_rot = link_pose[:3, :3]
- link_kinematics_jacobian = link_rot @ link_body_jacobian
- jacobians.append(link_kinematics_jacobian)
-
- # Note: the joint order in this jacobian is consistent pinocchio
- jacobians = np.stack(jacobians, axis=0)
- huber_distance.backward()
- grad_pos = torch_body_pos.grad.cpu().numpy()[:, None, :]
-
- # Convert the jacobian from pinocchio order to target order
- if self.adaptor is not None:
- jacobians = self.adaptor.backward_jacobian(jacobians)
- else:
- jacobians = jacobians[..., self.idx_pin2target]
-
- # Compute the gradient to the qpos
- grad_qpos = np.matmul(grad_pos, jacobians)
- grad_qpos = grad_qpos.mean(1).sum(0)
- grad_qpos += 2 * self.norm_delta * (x - last_qpos)
-
- grad[:] = grad_qpos[:]
-
- return result
-
- return objective
-
-
-class VectorOptimizer(Optimizer):
- retargeting_type = "VECTOR"
-
- def __init__(
- self,
- robot: RobotWrapper,
- target_joint_names: List[str],
- target_origin_link_names: List[str],
- target_task_link_names: List[str],
- target_link_human_indices: np.ndarray,
- huber_delta=0.02,
- norm_delta=4e-3,
- scaling=1.0,
- ):
- super().__init__(robot, target_joint_names, target_link_human_indices)
- self.origin_link_names = target_origin_link_names
- self.task_link_names = target_task_link_names
- self.huber_loss = torch.nn.SmoothL1Loss(beta=huber_delta, reduction="mean")
- self.norm_delta = norm_delta
- self.scaling = scaling
-
- # Computation cache for better performance
- # For one link used in multiple vectors, e.g. hand palm, we do not want to compute it multiple times
- self.computed_link_names = list(set(target_origin_link_names).union(set(target_task_link_names)))
- self.origin_link_indices = torch.tensor(
- [self.computed_link_names.index(name) for name in target_origin_link_names]
- )
- self.task_link_indices = torch.tensor([self.computed_link_names.index(name) for name in target_task_link_names])
-
- # Cache link indices that will involve in kinematics computation
- self.computed_link_indices = self.get_link_indices(self.computed_link_names)
-
- self.opt.set_ftol_abs(1e-6)
-
- def get_objective_function(self, target_vector: np.ndarray, fixed_qpos: np.ndarray, last_qpos: np.ndarray):
- qpos = np.zeros(self.num_joints)
- qpos[self.idx_pin2fixed] = fixed_qpos
- torch_target_vec = torch.as_tensor(target_vector) * self.scaling
- torch_target_vec.requires_grad_(False)
-
- def objective(x: np.ndarray, grad: np.ndarray) -> float:
- qpos[self.idx_pin2target] = x
-
- # Kinematics forwarding for qpos
- if self.adaptor is not None:
- qpos[:] = self.adaptor.forward_qpos(qpos)[:]
-
- self.robot.compute_forward_kinematics(qpos)
- target_link_poses = [self.robot.get_link_pose(index) for index in self.computed_link_indices]
- body_pos = np.array([pose[:3, 3] for pose in target_link_poses])
-
- # Torch computation for accurate loss and grad
- torch_body_pos = torch.as_tensor(body_pos)
- torch_body_pos.requires_grad_()
-
- # Index link for computation
- origin_link_pos = torch_body_pos[self.origin_link_indices, :]
- task_link_pos = torch_body_pos[self.task_link_indices, :]
- robot_vec = task_link_pos - origin_link_pos
-
- # Loss term for kinematics retargeting based on 3D position error
- vec_dist = torch.norm(robot_vec - torch_target_vec, dim=1, keepdim=False)
- huber_distance = self.huber_loss(vec_dist, torch.zeros_like(vec_dist))
- result = huber_distance.cpu().detach().item()
-
- if grad.size > 0:
- jacobians = []
- for i, index in enumerate(self.computed_link_indices):
- link_body_jacobian = self.robot.compute_single_link_local_jacobian(qpos, index)[:3, ...]
- link_pose = target_link_poses[i]
- link_rot = link_pose[:3, :3]
- link_kinematics_jacobian = link_rot @ link_body_jacobian
- jacobians.append(link_kinematics_jacobian)
-
- # Note: the joint order in this jacobian is consistent pinocchio
- jacobians = np.stack(jacobians, axis=0)
- huber_distance.backward()
- grad_pos = torch_body_pos.grad.cpu().numpy()[:, None, :]
-
- # Convert the jacobian from pinocchio order to target order
- if self.adaptor is not None:
- jacobians = self.adaptor.backward_jacobian(jacobians)
- else:
- jacobians = jacobians[..., self.idx_pin2target]
-
- grad_qpos = np.matmul(grad_pos, np.array(jacobians))
- grad_qpos = grad_qpos.mean(1).sum(0)
- grad_qpos += 2 * self.norm_delta * (x - last_qpos)
-
- grad[:] = grad_qpos[:]
-
- return result
-
- return objective
-
-
-class DexPilotOptimizer(Optimizer):
- """Retargeting optimizer using the method proposed in DexPilot
-
- This is a broader adaptation of the original optimizer delineated in the DexPilot paper.
- While the initial DexPilot study focused solely on the four-fingered Allegro Hand, this version of the optimizer
- embraces the same principles for both four-fingered and five-fingered hands. It projects the distance between the
- thumb and the other fingers to facilitate more stable grasping.
- Reference: https://arxiv.org/abs/1910.03135
-
- Args:
- robot:
- target_joint_names:
- finger_tip_link_names:
- wrist_link_name:
- gamma:
- project_dist:
- escape_dist:
- eta1:
- eta2:
- scaling:
- """
-
- retargeting_type = "DEXPILOT"
-
- def __init__(
- self,
- robot: RobotWrapper,
- target_joint_names: List[str],
- finger_tip_link_names: List[str],
- wrist_link_name: str,
- target_link_human_indices: Optional[np.ndarray] = None,
- huber_delta=0.03,
- norm_delta=4e-3,
- # DexPilot parameters
- # gamma=2.5e-3,
- project_dist=0.03,
- escape_dist=0.05,
- eta1=1e-4,
- eta2=3e-2,
- scaling=1.0,
- ):
- if len(finger_tip_link_names) < 2 or len(finger_tip_link_names) > 5:
- raise ValueError(
- f"DexPilot optimizer can only be applied to hands with 2 to 5 fingers, but got "
- f"{len(finger_tip_link_names)} fingers."
- )
- self.num_fingers = len(finger_tip_link_names)
-
- origin_link_index, task_link_index = self.generate_link_indices(self.num_fingers)
-
- if target_link_human_indices is None:
- logical_indices = np.stack([origin_link_index, task_link_index], axis=0)
- target_link_human_indices = np.where(
- logical_indices > 0,
- logical_indices * 5 - 1,
- 0
- ).astype(int)
- link_names = [wrist_link_name] + finger_tip_link_names
- target_origin_link_names = [link_names[index] for index in origin_link_index]
- target_task_link_names = [link_names[index] for index in task_link_index]
-
- super().__init__(robot, target_joint_names, target_link_human_indices)
- self.origin_link_names = target_origin_link_names
- self.task_link_names = target_task_link_names
- self.scaling = scaling
- self.huber_loss = torch.nn.SmoothL1Loss(beta=huber_delta, reduction="none")
- self.norm_delta = norm_delta
-
- # DexPilot parameters
- self.project_dist = project_dist
- self.escape_dist = escape_dist
- self.eta1 = eta1
- self.eta2 = eta2
-
- # Computation cache for better performance
- # For one link used in multiple vectors, e.g. hand palm, we do not want to compute it multiple times
- self.computed_link_names = list(set(target_origin_link_names).union(set(target_task_link_names)))
- self.origin_link_indices = torch.tensor(
- [self.computed_link_names.index(name) for name in target_origin_link_names]
- )
- self.task_link_indices = torch.tensor([self.computed_link_names.index(name) for name in target_task_link_names])
-
- # Sanity check and cache link indices
- self.computed_link_indices = self.get_link_indices(self.computed_link_names)
-
- self.opt.set_ftol_abs(1e-6)
-
- # DexPilot cache
- self.projected, self.s2_project_index_origin, self.s2_project_index_task, self.projected_dist = (
- self.set_dexpilot_cache(self.num_fingers, eta1, eta2)
- )
-
- @staticmethod
- def generate_link_indices(num_fingers):
- """
- Example:
- >>> generate_link_indices(4)
- ([2, 3, 4, 3, 4, 4, 0, 0, 0, 0], [1, 1, 1, 2, 2, 3, 1, 2, 3, 4])
- """
- origin_link_index = []
- task_link_index = []
-
- # S1:Add indices for connections between fingers
- for i in range(1, num_fingers):
- for j in range(i + 1, num_fingers + 1):
- origin_link_index.append(j)
- task_link_index.append(i)
-
- # S2:Add indices for connections to the base (0)
- for i in range(1, num_fingers + 1):
- origin_link_index.append(0)
- task_link_index.append(i)
-
- return origin_link_index, task_link_index
-
- @staticmethod
- def set_dexpilot_cache(num_fingers, eta1, eta2):
- """
- Example:
- >>> set_dexpilot_cache(4, 0.1, 0.2)
- (array([False, False, False, False, False, False]),
- [1, 2, 2],
- [0, 0, 1],
- array([0.1, 0.1, 0.1, 0.2, 0.2, 0.2]))
- """
- projected = np.zeros(num_fingers * (num_fingers - 1) // 2, dtype=bool)
-
- s2_project_index_origin = []
- s2_project_index_task = []
- for i in range(0, num_fingers - 2):
- for j in range(i + 1, num_fingers - 1):
- s2_project_index_origin.append(j)
- s2_project_index_task.append(i)
-
- projected_dist = np.array([eta1] * (num_fingers - 1) + [eta2] * ((num_fingers - 1) * (num_fingers - 2) // 2))
-
- return projected, s2_project_index_origin, s2_project_index_task, projected_dist
-
- def get_objective_function(self, target_vector: np.ndarray, fixed_qpos: np.ndarray, last_qpos: np.ndarray):
- qpos = np.zeros(self.num_joints)
- qpos[self.idx_pin2fixed] = fixed_qpos
-
- len_proj = len(self.projected)
- len_s2 = len(self.s2_project_index_task)
- len_s1 = len_proj - len_s2
-
- # Update projection indicator
- target_vec_dist = np.linalg.norm(target_vector[:len_proj], axis=1)
- self.projected[:len_s1][target_vec_dist[0:len_s1] < self.project_dist] = True
- self.projected[:len_s1][target_vec_dist[0:len_s1] > self.escape_dist] = False
- self.projected[len_s1:len_proj] = np.logical_and(
- self.projected[:len_s1][self.s2_project_index_origin], self.projected[:len_s1][self.s2_project_index_task]
- )
- self.projected[len_s1:len_proj] = np.logical_and(
- self.projected[len_s1:len_proj], target_vec_dist[len_s1:len_proj] <= 0.03
- )
-
- # Update weight vector
- normal_weight = np.ones(len_proj, dtype=np.float32) * 1
- high_weight = np.array([200] * len_s1 + [400] * len_s2, dtype=np.float32)
- weight = np.where(self.projected, high_weight, normal_weight)
-
- # We change the weight to 10 instead of 1 here, for vector originate from wrist to fingertips
- # This ensures better intuitive mapping due wrong pose detection
- weight = torch.from_numpy(
- np.concatenate([weight, np.ones(self.num_fingers, dtype=np.float32) * len_proj + self.num_fingers])
- )
-
- # Compute reference distance vector
- normal_vec = target_vector * self.scaling # (10, 3)
- dir_vec = target_vector[:len_proj] / (target_vec_dist[:, None] + 1e-6) # (6, 3)
- projected_vec = dir_vec * self.projected_dist[:, None] # (6, 3)
-
- # Compute final reference vector
- reference_vec = np.where(self.projected[:, None], projected_vec, normal_vec[:len_proj]) # (6, 3)
- reference_vec = np.concatenate([reference_vec, normal_vec[len_proj:]], axis=0) # (10, 3)
- torch_target_vec = torch.as_tensor(reference_vec, dtype=torch.float32)
- torch_target_vec.requires_grad_(False)
-
- def objective(x: np.ndarray, grad: np.ndarray) -> float:
- qpos[self.idx_pin2target] = x
-
- # Kinematics forwarding for qpos
- if self.adaptor is not None:
- qpos[:] = self.adaptor.forward_qpos(qpos)[:]
-
- self.robot.compute_forward_kinematics(qpos)
- target_link_poses = [self.robot.get_link_pose(index) for index in self.computed_link_indices]
- body_pos = np.array([pose[:3, 3] for pose in target_link_poses])
-
- # Torch computation for accurate loss and grad
- torch_body_pos = torch.as_tensor(body_pos)
- torch_body_pos.requires_grad_()
-
- # Index link for computation
- origin_link_pos = torch_body_pos[self.origin_link_indices, :]
- task_link_pos = torch_body_pos[self.task_link_indices, :]
- robot_vec = task_link_pos - origin_link_pos
-
- # Loss term for kinematics retargeting based on 3D position error
- # Different from the original DexPilot, we use huber loss here instead of the squared dist
- vec_dist = torch.norm(robot_vec - torch_target_vec, dim=1, keepdim=False)
- huber_distance = (
- self.huber_loss(vec_dist, torch.zeros_like(vec_dist)) * weight / (robot_vec.shape[0])
- ).sum()
- huber_distance = huber_distance.sum()
- result = huber_distance.cpu().detach().item()
-
- if grad.size > 0:
- jacobians = []
- for i, index in enumerate(self.computed_link_indices):
- link_body_jacobian = self.robot.compute_single_link_local_jacobian(qpos, index)[:3, ...]
- link_pose = target_link_poses[i]
- link_rot = link_pose[:3, :3]
- link_kinematics_jacobian = link_rot @ link_body_jacobian
- jacobians.append(link_kinematics_jacobian)
-
- # Note: the joint order in this jacobian is consistent pinocchio
- jacobians = np.stack(jacobians, axis=0)
- huber_distance.backward()
- grad_pos = torch_body_pos.grad.cpu().numpy()[:, None, :]
-
- # Convert the jacobian from pinocchio order to target order
- if self.adaptor is not None:
- jacobians = self.adaptor.backward_jacobian(jacobians)
- else:
- jacobians = jacobians[..., self.idx_pin2target]
-
- grad_qpos = np.matmul(grad_pos, np.array(jacobians))
- grad_qpos = grad_qpos.mean(1).sum(0)
-
- # In the original DexPilot, γ = 2.5 × 10−3 is a weight on regularizing the Allegro angles to zero
- # which is equivalent to fully opened the hand
- # In our implementation, we regularize the joint angles to the previous joint angles
- grad_qpos += 2 * self.norm_delta * (x - last_qpos)
-
- grad[:] = grad_qpos[:]
-
- return result
-
- return objective
diff --git a/teleop/robot_control/dex_retargeting/optimizer_utils.py b/teleop/robot_control/dex_retargeting/optimizer_utils.py
deleted file mode 100644
index 2b54275..0000000
--- a/teleop/robot_control/dex_retargeting/optimizer_utils.py
+++ /dev/null
@@ -1,17 +0,0 @@
-class LPFilter:
- def __init__(self, alpha):
- self.alpha = alpha
- self.y = None
- self.is_init = False
-
- def next(self, x):
- if not self.is_init:
- self.y = x
- self.is_init = True
- return self.y.copy()
- self.y = self.y + self.alpha * (x - self.y)
- return self.y.copy()
-
- def reset(self):
- self.y = None
- self.is_init = False
diff --git a/teleop/robot_control/dex_retargeting/retargeting_config.py b/teleop/robot_control/dex_retargeting/retargeting_config.py
deleted file mode 100644
index f1e8cd8..0000000
--- a/teleop/robot_control/dex_retargeting/retargeting_config.py
+++ /dev/null
@@ -1,255 +0,0 @@
-from dataclasses import dataclass
-from pathlib import Path
-from typing import List, Optional, Dict, Any, Tuple
-from typing import Union
-
-import numpy as np
-import yaml
-import os
-
-from . import yourdfpy as urdf
-from .kinematics_adaptor import MimicJointKinematicAdaptor
-from .optimizer_utils import LPFilter
-from .robot_wrapper import RobotWrapper
-from .seq_retarget import SeqRetargeting
-from .yourdfpy import DUMMY_JOINT_NAMES
-
-
-@dataclass
-class RetargetingConfig:
- type: str
- urdf_path: str
- target_joint_names: Optional[List[str]] = None
-
- # Whether to add free joint to the root of the robot. Free joint enable the robot hand move freely in the space
- add_dummy_free_joint: bool = False
-
- # DexPilot retargeting related
- # The link on the robot hand which corresponding to the wrist of human hand
- wrist_link_name: Optional[str] = None
- # DexPilot retargeting link names
- finger_tip_link_names: Optional[List[str]] = None
- target_link_human_indices_dexpilot: Optional[np.ndarray] = None
-
- # Position retargeting link names
- target_link_names: Optional[List[str]] = None
- target_link_human_indices_position: Optional[np.ndarray] = None
-
- # Vector retargeting link names
- target_origin_link_names: Optional[List[str]] = None
- target_task_link_names: Optional[List[str]] = None
- target_link_human_indices_vector: Optional[np.ndarray] = None
-
- # Scaling factor for vector retargeting only
- # For example, Allegro is 1.6 times larger than normal human hand, then this scaling factor should be 1.6
- scaling_factor: float = 1.0
-
- # Low pass filter
- low_pass_alpha: float = 0.1
-
- # Optimization parameters
- normal_delta: float = 4e-3
- huber_delta: float = 2e-2
-
- # DexPilot optimizer parameters
- project_dist: float = 0.03
- escape_dist: float = 0.05
-
- # Joint limit tag
- has_joint_limits: bool = True
-
- # Mimic joint tag
- ignore_mimic_joint: bool = False
-
- _TYPE = ["vector", "position", "dexpilot"]
- _DEFAULT_URDF_DIR = "./"
-
- def __post_init__(self):
- # Retargeting type check
- self.type = self.type.lower()
- if self.type not in self._TYPE:
- raise ValueError(f"Retargeting type must be one of {self._TYPE}")
-
- # Vector retargeting requires: target_origin_link_names + target_task_link_names
- # Position retargeting requires: target_link_names
- if self.type == "vector":
- if self.target_origin_link_names is None or self.target_task_link_names is None:
- raise ValueError(f"Vector retargeting requires: target_origin_link_names + target_task_link_names")
- if len(self.target_task_link_names) != len(self.target_origin_link_names):
- raise ValueError(f"Vector retargeting origin and task links dim mismatch")
- if self.target_link_human_indices_vector.shape != (2, len(self.target_origin_link_names)):
- raise ValueError(f"Vector retargeting link names and link indices dim mismatch")
- if self.target_link_human_indices_vector is None:
- raise ValueError(f"Vector retargeting requires: target_link_human_indices_vector")
-
- elif self.type == "position":
- if self.target_link_names is None:
- raise ValueError(f"Position retargeting requires: target_link_names")
- self.target_link_human_indices_position = self.target_link_human_indices_position.squeeze()
- if self.target_link_human_indices_position.shape != (len(self.target_link_names),):
- raise ValueError(f"Position retargeting link names and link indices dim mismatch")
- if self.target_link_human_indices_position is None:
- raise ValueError(f"Position retargeting requires: target_link_human_indices_position")
-
- elif self.type == "dexpilot":
- if self.finger_tip_link_names is None or self.wrist_link_name is None:
- raise ValueError(f"Position retargeting requires: finger_tip_link_names + wrist_link_name")
- if self.target_link_human_indices_dexpilot is not None:
- print(
- "\033[33m",
- "Target link human indices is provided in the DexPilot retargeting config, which is uncommon.\n"
- "If you do not know exactly how it is used, please leave it to None for default.\n"
- "\033[00m",
- )
-
- # URDF path check
- urdf_path = Path(self.urdf_path)
- if not urdf_path.is_absolute():
- urdf_path = self._DEFAULT_URDF_DIR / urdf_path
- urdf_path = urdf_path.absolute()
- if not urdf_path.exists():
- raise ValueError(f"URDF path {urdf_path} does not exist")
- self.urdf_path = str(urdf_path)
-
- @classmethod
- def set_default_urdf_dir(cls, urdf_dir: Union[str, Path]):
- path = Path(urdf_dir)
- if not path.exists():
- raise ValueError(f"URDF dir {urdf_dir} not exists.")
- cls._DEFAULT_URDF_DIR = urdf_dir
-
- @classmethod
- def load_from_file(cls, config_path: Union[str, Path], override: Optional[Dict] = None):
- path = Path(config_path)
- if not path.is_absolute():
- path = path.absolute()
-
- with path.open("r") as f:
- yaml_config = yaml.load(f, Loader=yaml.FullLoader)
- cfg = yaml_config["retargeting"]
- return cls.from_dict(cfg, override)
-
- @classmethod
- def from_dict(cls, cfg: Dict[str, Any], override: Optional[Dict] = None):
- if "target_link_human_indices_position" in cfg:
- cfg["target_link_human_indices_position"] = np.array(cfg["target_link_human_indices_position"])
- if "target_link_human_indices_vector" in cfg:
- cfg["target_link_human_indices_vector"] = np.array(cfg["target_link_human_indices_vector"])
- if "target_link_human_indices_dexpilot" in cfg:
- cfg["target_link_human_indices_dexpilot"] = np.array(cfg["target_link_human_indices_dexpilot"])
-
- if override is not None:
- for key, value in override.items():
- cfg[key] = value
- config = RetargetingConfig(**cfg)
- return config
-
- def build(self) -> SeqRetargeting:
- from .optimizer import (
- VectorOptimizer,
- PositionOptimizer,
- DexPilotOptimizer,
- )
- import tempfile
-
- # Process the URDF with yourdfpy to better find file path
- robot_urdf = urdf.URDF.load(
- self.urdf_path, add_dummy_free_joints=self.add_dummy_free_joint, build_scene_graph=False
- )
- urdf_name = self.urdf_path.split(os.path.sep)[-1]
- temp_dir = tempfile.mkdtemp(prefix="dex_retargeting-")
- temp_path = f"{temp_dir}/{urdf_name}"
- robot_urdf.write_xml_file(temp_path)
-
- # Load pinocchio model
- robot = RobotWrapper(temp_path)
-
- # Add 6D dummy joint to target joint names so that it will also be optimized
- if self.add_dummy_free_joint and self.target_joint_names is not None:
- self.target_joint_names = DUMMY_JOINT_NAMES + self.target_joint_names
- joint_names = self.target_joint_names if self.target_joint_names is not None else robot.dof_joint_names
-
- if self.type == "position":
- optimizer = PositionOptimizer(
- robot,
- joint_names,
- target_link_names=self.target_link_names,
- target_link_human_indices=self.target_link_human_indices_position,
- norm_delta=self.normal_delta,
- huber_delta=self.huber_delta,
- )
- elif self.type == "vector":
- optimizer = VectorOptimizer(
- robot,
- joint_names,
- target_origin_link_names=self.target_origin_link_names,
- target_task_link_names=self.target_task_link_names,
- target_link_human_indices=self.target_link_human_indices_vector,
- scaling=self.scaling_factor,
- norm_delta=self.normal_delta,
- huber_delta=self.huber_delta,
- )
- elif self.type == "dexpilot":
- optimizer = DexPilotOptimizer(
- robot,
- joint_names,
- finger_tip_link_names=self.finger_tip_link_names,
- wrist_link_name=self.wrist_link_name,
- target_link_human_indices=self.target_link_human_indices_dexpilot,
- scaling=self.scaling_factor,
- project_dist=self.project_dist,
- escape_dist=self.escape_dist,
- )
- else:
- raise RuntimeError()
-
- if 0 <= self.low_pass_alpha <= 1:
- lp_filter = LPFilter(self.low_pass_alpha)
- else:
- lp_filter = None
-
- # Parse mimic joints and set kinematics adaptor for optimizer
- has_mimic_joints, source_names, mimic_names, multipliers, offsets = parse_mimic_joint(robot_urdf)
- if has_mimic_joints and not self.ignore_mimic_joint:
- adaptor = MimicJointKinematicAdaptor(
- robot,
- target_joint_names=joint_names,
- source_joint_names=source_names,
- mimic_joint_names=mimic_names,
- multipliers=multipliers,
- offsets=offsets,
- )
- optimizer.set_kinematic_adaptor(adaptor)
- print(
- "\033[34m",
- "Mimic joint adaptor enabled. The mimic joint tags in the URDF will be considered during retargeting.\n"
- "To disable mimic joint adaptor, consider setting ignore_mimic_joint=True in the configuration.",
- "\033[39m",
- )
-
- retargeting = SeqRetargeting(
- optimizer,
- has_joint_limits=self.has_joint_limits,
- lp_filter=lp_filter,
- )
- return retargeting
-
-
-def get_retargeting_config(config_path: Union[str, Path]) -> RetargetingConfig:
- config = RetargetingConfig.load_from_file(config_path)
- return config
-
-
-def parse_mimic_joint(robot_urdf: urdf.URDF) -> Tuple[bool, List[str], List[str], List[float], List[float]]:
- mimic_joint_names = []
- source_joint_names = []
- multipliers = []
- offsets = []
- for name, joint in robot_urdf.joint_map.items():
- if joint.mimic is not None:
- mimic_joint_names.append(name)
- source_joint_names.append(joint.mimic.joint)
- multipliers.append(joint.mimic.multiplier)
- offsets.append(joint.mimic.offset)
-
- return len(mimic_joint_names) > 0, source_joint_names, mimic_joint_names, multipliers, offsets
diff --git a/teleop/robot_control/dex_retargeting/robot_wrapper.py b/teleop/robot_control/dex_retargeting/robot_wrapper.py
deleted file mode 100644
index 335fb43..0000000
--- a/teleop/robot_control/dex_retargeting/robot_wrapper.py
+++ /dev/null
@@ -1,93 +0,0 @@
-from typing import List
-
-import numpy as np
-import numpy.typing as npt
-import pinocchio as pin
-
-
-class RobotWrapper:
- """
- This class does not take mimic joint into consideration
- """
-
- def __init__(self, urdf_path: str, use_collision=False, use_visual=False):
- # Create robot model and data
- self.model: pin.Model = pin.buildModelFromUrdf(urdf_path)
- self.data: pin.Data = self.model.createData()
-
- if use_visual or use_collision:
- raise NotImplementedError
-
- self.q0 = pin.neutral(self.model)
- if self.model.nv != self.model.nq:
- raise NotImplementedError(f"Can not handle robot with special joint.")
-
- # -------------------------------------------------------------------------- #
- # Robot property
- # -------------------------------------------------------------------------- #
- @property
- def joint_names(self) -> List[str]:
- return list(self.model.names)
-
- @property
- def dof_joint_names(self) -> List[str]:
- nqs = self.model.nqs
- return [name for i, name in enumerate(self.model.names) if nqs[i] > 0]
-
- @property
- def dof(self) -> int:
- return self.model.nq
-
- @property
- def link_names(self) -> List[str]:
- link_names = []
- for i, frame in enumerate(self.model.frames):
- link_names.append(frame.name)
- return link_names
-
- @property
- def joint_limits(self):
- lower = self.model.lowerPositionLimit
- upper = self.model.upperPositionLimit
- return np.stack([lower, upper], axis=1)
-
- # -------------------------------------------------------------------------- #
- # Query function
- # -------------------------------------------------------------------------- #
- def get_joint_index(self, name: str):
- return self.dof_joint_names.index(name)
-
- def get_link_index(self, name: str):
- if name not in self.link_names:
- raise ValueError(f"{name} is not a link name. Valid link names: \n{self.link_names}")
- return self.model.getFrameId(name, pin.BODY)
-
- def get_joint_parent_child_frames(self, joint_name: str):
- joint_id = self.model.getFrameId(joint_name)
- parent_id = self.model.frames[joint_id].parent
- child_id = -1
- for idx, frame in enumerate(self.model.frames):
- if frame.previousFrame == joint_id:
- child_id = idx
- if child_id == -1:
- raise ValueError(f"Can not find child link of {joint_name}")
-
- return parent_id, child_id
-
- # -------------------------------------------------------------------------- #
- # Kinematics function
- # -------------------------------------------------------------------------- #
- def compute_forward_kinematics(self, qpos: npt.NDArray):
- pin.forwardKinematics(self.model, self.data, qpos)
-
- def get_link_pose(self, link_id: int) -> npt.NDArray:
- pose: pin.SE3 = pin.updateFramePlacement(self.model, self.data, link_id)
- return pose.homogeneous
-
- def get_link_pose_inv(self, link_id: int) -> npt.NDArray:
- pose: pin.SE3 = pin.updateFramePlacement(self.model, self.data, link_id)
- return pose.inverse().homogeneous
-
- def compute_single_link_local_jacobian(self, qpos, link_id: int) -> npt.NDArray:
- J = pin.computeFrameJacobian(self.model, self.data, qpos, link_id)
- return J
diff --git a/teleop/robot_control/dex_retargeting/seq_retarget.py b/teleop/robot_control/dex_retargeting/seq_retarget.py
deleted file mode 100644
index a24529c..0000000
--- a/teleop/robot_control/dex_retargeting/seq_retarget.py
+++ /dev/null
@@ -1,151 +0,0 @@
-import time
-from typing import Optional
-
-import numpy as np
-from pytransform3d import rotations
-
-from .constants import OPERATOR2MANO, HandType
-from .optimizer import Optimizer
-from .optimizer_utils import LPFilter
-
-
-class SeqRetargeting:
- def __init__(
- self,
- optimizer: Optimizer,
- has_joint_limits=True,
- lp_filter: Optional[LPFilter] = None,
- ):
- self.optimizer = optimizer
- robot = self.optimizer.robot
-
- # Joint limit
- self.has_joint_limits = has_joint_limits
- joint_limits = np.ones_like(robot.joint_limits)
- joint_limits[:, 0] = -1e4 # a large value is equivalent to no limit
- joint_limits[:, 1] = 1e4
- if has_joint_limits:
- joint_limits[:] = robot.joint_limits[:]
- self.optimizer.set_joint_limit(joint_limits[self.optimizer.idx_pin2target])
- self.joint_limits = joint_limits[self.optimizer.idx_pin2target]
-
- # Temporal information
- self.last_qpos = joint_limits.mean(1)[self.optimizer.idx_pin2target].astype(np.float32)
- self.accumulated_time = 0
- self.num_retargeting = 0
-
- # Filter
- self.filter = lp_filter
-
- # Warm started
- self.is_warm_started = False
-
- def warm_start(
- self,
- wrist_pos: np.ndarray,
- wrist_quat: np.ndarray,
- hand_type: HandType = HandType.right,
- is_mano_convention: bool = False,
- ):
- """
- Initialize the wrist joint pose using analytical computation instead of retargeting optimization.
- This function is specifically for position retargeting with the flying robot hand, i.e. has 6D free joint
- You are not expected to use this function for vector retargeting, e.g. when you are working on teleoperation
-
- Args:
- wrist_pos: position of the hand wrist, typically from human hand pose
- wrist_quat: quaternion of the hand wrist, the same convention as the operator frame definition if not is_mano_convention
- hand_type: hand type, used to determine the operator2mano matrix
- is_mano_convention: whether the wrist_quat is in mano convention
- """
- # This function can only be used when the first joints of robot are free joints
-
- if len(wrist_pos) != 3:
- raise ValueError(f"Wrist pos: {wrist_pos} is not a 3-dim vector.")
- if len(wrist_quat) != 4:
- raise ValueError(f"Wrist quat: {wrist_quat} is not a 4-dim vector.")
-
- operator2mano = OPERATOR2MANO[hand_type] if is_mano_convention else np.eye(3)
- robot = self.optimizer.robot
- target_wrist_pose = np.eye(4)
- target_wrist_pose[:3, :3] = rotations.matrix_from_quaternion(wrist_quat) @ operator2mano.T
- target_wrist_pose[:3, 3] = wrist_pos
-
- name_list = [
- "dummy_x_translation_joint",
- "dummy_y_translation_joint",
- "dummy_z_translation_joint",
- "dummy_x_rotation_joint",
- "dummy_y_rotation_joint",
- "dummy_z_rotation_joint",
- ]
- wrist_link_id = robot.get_joint_parent_child_frames(name_list[5])[1]
-
- # Set the dummy joints angles to zero
- old_qpos = robot.q0
- new_qpos = old_qpos.copy()
- for num, joint_name in enumerate(self.optimizer.target_joint_names):
- if joint_name in name_list:
- new_qpos[num] = 0
-
- robot.compute_forward_kinematics(new_qpos)
- root2wrist = robot.get_link_pose_inv(wrist_link_id)
- target_root_pose = target_wrist_pose @ root2wrist
-
- euler = rotations.euler_from_matrix(target_root_pose[:3, :3], 0, 1, 2, extrinsic=False)
- pose_vec = np.concatenate([target_root_pose[:3, 3], euler])
-
- # Find the dummy joints
- for num, joint_name in enumerate(self.optimizer.target_joint_names):
- if joint_name in name_list:
- index = name_list.index(joint_name)
- self.last_qpos[num] = pose_vec[index]
-
- self.is_warm_started = True
-
- def retarget(self, ref_value, fixed_qpos=np.array([])):
- tic = time.perf_counter()
-
- qpos = self.optimizer.retarget(
- ref_value=ref_value.astype(np.float32),
- fixed_qpos=fixed_qpos.astype(np.float32),
- last_qpos=np.clip(self.last_qpos, self.joint_limits[:, 0], self.joint_limits[:, 1]),
- )
- self.accumulated_time += time.perf_counter() - tic
- self.num_retargeting += 1
- self.last_qpos = qpos
- robot_qpos = np.zeros(self.optimizer.robot.dof)
- robot_qpos[self.optimizer.idx_pin2fixed] = fixed_qpos
- robot_qpos[self.optimizer.idx_pin2target] = qpos
-
- if self.optimizer.adaptor is not None:
- robot_qpos = self.optimizer.adaptor.forward_qpos(robot_qpos)
-
- if self.filter is not None:
- robot_qpos = self.filter.next(robot_qpos)
- return robot_qpos
-
- def set_qpos(self, robot_qpos: np.ndarray):
- target_qpos = robot_qpos[self.optimizer.idx_pin2target]
- self.last_qpos = target_qpos
-
- def get_qpos(self, fixed_qpos: Optional[np.ndarray] = None):
- robot_qpos = np.zeros(self.optimizer.robot.dof)
- robot_qpos[self.optimizer.idx_pin2target] = self.last_qpos
- if fixed_qpos is not None:
- robot_qpos[self.optimizer.idx_pin2fixed] = fixed_qpos
- return robot_qpos
-
- def verbose(self):
- min_value = self.optimizer.opt.last_optimum_value()
- print(f"Retargeting {self.num_retargeting} times takes: {self.accumulated_time}s")
- print(f"Last distance: {min_value}")
-
- def reset(self):
- self.last_qpos = self.joint_limits.mean(1).astype(np.float32)
- self.num_retargeting = 0
- self.accumulated_time = 0
-
- @property
- def joint_names(self):
- return self.optimizer.robot.dof_joint_names
diff --git a/teleop/robot_control/dex_retargeting/yourdfpy.py b/teleop/robot_control/dex_retargeting/yourdfpy.py
deleted file mode 100644
index ce8e287..0000000
--- a/teleop/robot_control/dex_retargeting/yourdfpy.py
+++ /dev/null
@@ -1,2237 +0,0 @@
-# Code from yourdfpy with small modification for deprecated warning
-# Source: https://github.com/clemense/yourdfpy/blob/main/src/yourdfpy/urdf.py
-
-import copy
-import logging
-import os
-from dataclasses import dataclass, field, is_dataclass
-from functools import partial
-from typing import Dict, List, Optional, Union
-
-import anytree
-import numpy as np
-import six
-import trimesh
-import trimesh.transformations as tra
-from anytree import Node, LevelOrderIter
-from lxml import etree
-
-_logger = logging.getLogger(__name__)
-
-
-def _array_eq(arr1, arr2):
- if arr1 is None and arr2 is None:
- return True
- return (
- isinstance(arr1, np.ndarray)
- and isinstance(arr2, np.ndarray)
- and arr1.shape == arr2.shape
- and (arr1 == arr2).all()
- )
-
-
-@dataclass(eq=False)
-class TransmissionJoint:
- name: str
- hardware_interfaces: List[str] = field(default_factory=list)
-
- def __eq__(self, other):
- if not isinstance(other, TransmissionJoint):
- return NotImplemented
- return (
- self.name == other.name
- and all(self_hi in other.hardware_interfaces for self_hi in self.hardware_interfaces)
- and all(other_hi in self.hardware_interfaces for other_hi in other.hardware_interfaces)
- )
-
-
-@dataclass(eq=False)
-class Actuator:
- name: str
- mechanical_reduction: Optional[float] = None
- # The follwing is only valid for ROS Indigo and prior versions
- hardware_interfaces: List[str] = field(default_factory=list)
-
- def __eq__(self, other):
- if not isinstance(other, Actuator):
- return NotImplemented
- return (
- self.name == other.name
- and self.mechanical_reduction == other.mechanical_reduction
- and all(self_hi in other.hardware_interfaces for self_hi in self.hardware_interfaces)
- and all(other_hi in self.hardware_interfaces for other_hi in other.hardware_interfaces)
- )
-
-
-@dataclass(eq=False)
-class Transmission:
- name: str
- type: Optional[str] = None
- joints: List[TransmissionJoint] = field(default_factory=list)
- actuators: List[Actuator] = field(default_factory=list)
-
- def __eq__(self, other):
- if not isinstance(other, Transmission):
- return NotImplemented
- return (
- self.name == other.name
- and self.type == other.type
- and all(self_joint in other.joints for self_joint in self.joints)
- and all(other_joint in self.joints for other_joint in other.joints)
- and all(self_actuator in other.actuators for self_actuator in self.actuators)
- and all(other_actuator in self.actuators for other_actuator in other.actuators)
- )
-
-
-@dataclass
-class Calibration:
- rising: Optional[float] = None
- falling: Optional[float] = None
-
-
-@dataclass
-class Mimic:
- joint: str
- multiplier: Optional[float] = None
- offset: Optional[float] = None
-
-
-@dataclass
-class SafetyController:
- soft_lower_limit: Optional[float] = None
- soft_upper_limit: Optional[float] = None
- k_position: Optional[float] = None
- k_velocity: Optional[float] = None
-
-
-@dataclass
-class Sphere:
- radius: float
-
-
-@dataclass
-class Cylinder:
- radius: float
- length: float
-
-
-@dataclass(eq=False)
-class Box:
- size: np.ndarray
-
- def __eq__(self, other):
- if not isinstance(other, Box):
- return NotImplemented
- return _array_eq(self.size, other.size)
-
-
-@dataclass(eq=False)
-class Mesh:
- filename: str
- scale: Optional[Union[float, np.ndarray]] = None
-
- def __eq__(self, other):
- if not isinstance(other, Mesh):
- return NotImplemented
-
- if self.filename != other.filename:
- return False
-
- if isinstance(self.scale, float) and isinstance(other.scale, float):
- return self.scale == other.scale
-
- return _array_eq(self.scale, other.scale)
-
-
-@dataclass
-class Geometry:
- box: Optional[Box] = None
- cylinder: Optional[Cylinder] = None
- sphere: Optional[Sphere] = None
- mesh: Optional[Mesh] = None
-
-
-@dataclass(eq=False)
-class Color:
- rgba: np.ndarray
-
- def __eq__(self, other):
- if not isinstance(other, Color):
- return NotImplemented
- return _array_eq(self.rgba, other.rgba)
-
-
-@dataclass
-class Texture:
- filename: str
-
-
-@dataclass
-class Material:
- name: Optional[str] = None
- color: Optional[Color] = None
- texture: Optional[Texture] = None
-
-
-@dataclass(eq=False)
-class Visual:
- name: Optional[str] = None
- origin: Optional[np.ndarray] = None
- geometry: Optional[Geometry] = None # That's not really optional according to ROS
- material: Optional[Material] = None
-
- def __eq__(self, other):
- if not isinstance(other, Visual):
- return NotImplemented
- return (
- self.name == other.name
- and _array_eq(self.origin, other.origin)
- and self.geometry == other.geometry
- and self.material == other.material
- )
-
-
-@dataclass(eq=False)
-class Collision:
- name: str
- origin: Optional[np.ndarray] = None
- geometry: Geometry = None
-
- def __eq__(self, other):
- if not isinstance(other, Collision):
- return NotImplemented
- return self.name == other.name and _array_eq(self.origin, other.origin) and self.geometry == other.geometry
-
-
-@dataclass(eq=False)
-class Inertial:
- origin: Optional[np.ndarray] = None
- mass: Optional[float] = None
- inertia: Optional[np.ndarray] = None
-
- def __eq__(self, other):
- if not isinstance(other, Inertial):
- return NotImplemented
- return (
- _array_eq(self.origin, other.origin) and self.mass == other.mass and _array_eq(self.inertia, other.inertia)
- )
-
-
-@dataclass(eq=False)
-class Link:
- name: str
- inertial: Optional[Inertial] = None
- visuals: List[Visual] = field(default_factory=list)
- collisions: List[Collision] = field(default_factory=list)
-
- def __eq__(self, other):
- if not isinstance(other, Link):
- return NotImplemented
- return (
- self.name == other.name
- and self.inertial == other.inertial
- and all(self_visual in other.visuals for self_visual in self.visuals)
- and all(other_visual in self.visuals for other_visual in other.visuals)
- and all(self_collision in other.collisions for self_collision in self.collisions)
- and all(other_collision in self.collisions for other_collision in other.collisions)
- )
-
-
-@dataclass
-class Dynamics:
- damping: Optional[float] = None
- friction: Optional[float] = None
-
-
-@dataclass
-class Limit:
- effort: Optional[float] = None
- velocity: Optional[float] = None
- lower: Optional[float] = None
- upper: Optional[float] = None
-
-
-@dataclass(eq=False)
-class Joint:
- name: str
- type: str = None
- parent: str = None
- child: str = None
- origin: np.ndarray = None
- axis: np.ndarray = None
- dynamics: Optional[Dynamics] = None
- limit: Optional[Limit] = None
- mimic: Optional[Mimic] = None
- calibration: Optional[Calibration] = None
- safety_controller: Optional[SafetyController] = None
-
- def __eq__(self, other):
- if not isinstance(other, Joint):
- return NotImplemented
- return (
- self.name == other.name
- and self.type == other.type
- and self.parent == other.parent
- and self.child == other.child
- and _array_eq(self.origin, other.origin)
- and _array_eq(self.axis, other.axis)
- and self.dynamics == other.dynamics
- and self.limit == other.limit
- and self.mimic == other.mimic
- and self.calibration == other.calibration
- and self.safety_controller == other.safety_controller
- )
-
-
-@dataclass(eq=False)
-class Robot:
- name: str
- links: List[Link] = field(default_factory=list)
- joints: List[Joint] = field(default_factory=list)
- materials: List[Material] = field(default_factory=list)
- transmission: List[str] = field(default_factory=list)
- gazebo: List[str] = field(default_factory=list)
-
- def __eq__(self, other):
- if not isinstance(other, Robot):
- return NotImplemented
- return (
- self.name == other.name
- and all(self_link in other.links for self_link in self.links)
- and all(other_link in self.links for other_link in other.links)
- and all(self_joint in other.joints for self_joint in self.joints)
- and all(other_joint in self.joints for other_joint in other.joints)
- and all(self_material in other.materials for self_material in self.materials)
- and all(other_material in self.materials for other_material in other.materials)
- and all(self_transmission in other.transmission for self_transmission in self.transmission)
- and all(other_transmission in self.transmission for other_transmission in other.transmission)
- and all(self_gazebo in other.gazebo for self_gazebo in self.gazebo)
- and all(other_gazebo in self.gazebo for other_gazebo in other.gazebo)
- )
-
-
-class URDFError(Exception):
- """General URDF exception."""
-
- def __init__(self, msg):
- super(URDFError, self).__init__()
- self.msg = msg
-
- def __str__(self):
- return type(self).__name__ + ": " + self.msg
-
- def __repr__(self):
- return type(self).__name__ + '("' + self.msg + '")'
-
-
-class URDFIncompleteError(URDFError):
- """Raised when needed data for an object isn't there."""
-
- pass
-
-
-class URDFAttributeValueError(URDFError):
- """Raised when attribute value is not contained in the set of allowed values."""
-
- pass
-
-
-class URDFBrokenRefError(URDFError):
- """Raised when a referenced object is not found in the scope."""
-
- pass
-
-
-class URDFMalformedError(URDFError):
- """Raised when data is found to be corrupted in some way."""
-
- pass
-
-
-class URDFUnsupportedError(URDFError):
- """Raised when some unexpectedly unsupported feature is found."""
-
- pass
-
-
-class URDFSaveValidationError(URDFError):
- """Raised when XML validation fails when saving."""
-
- pass
-
-
-def _str2float(s):
- """Cast string to float if it is not None. Otherwise return None.
-
- Args:
- s (str): String to convert or None.
-
- Returns:
- str or NoneType: The converted string or None.
- """
- return float(s) if s is not None else None
-
-
-def apply_visual_color(
- geom: trimesh.Trimesh,
- visual: Visual,
- material_map: Dict[str, Material],
-) -> None:
- """Apply the color of the visual material to the mesh.
-
- Args:
- geom: Trimesh to color.
- visual: Visual description from XML.
- material_map: Dictionary mapping material names to their definitions.
- """
- if visual.material is None:
- return
-
- if visual.material.color is not None:
- color = visual.material.color
- elif visual.material.name is not None and visual.material.name in material_map:
- color = material_map[visual.material.name].color
- else:
- return
-
- if color is None:
- return
- if isinstance(geom.visual, trimesh.visual.ColorVisuals):
- geom.visual.face_colors[:] = [int(255 * channel) for channel in color.rgba]
-
-
-def filename_handler_null(fname):
- """A lazy filename handler that simply returns its input.
-
- Args:
- fname (str): A file name.
-
- Returns:
- str: Same file name.
- """
- return fname
-
-
-def filename_handler_ignore_directive(fname):
- """A filename handler that removes anything before (and including) '://'.
-
- Args:
- fname (str): A file name.
-
- Returns:
- str: The file name without the prefix.
- """
- if "://" in fname or ":\\\\" in fname:
- return ":".join(fname.split(":")[1:])[2:]
- return fname
-
-
-def filename_handler_ignore_directive_package(fname):
- """A filename handler that removes the 'package://' directive and the package it refers to.
- It subsequently calls filename_handler_ignore_directive, i.e., it removes any other directive.
-
- Args:
- fname (str): A file name.
-
- Returns:
- str: The file name without 'package://' and the package name.
- """
- if fname.startswith("package://"):
- string_length = len("package://")
- return os.path.join(*os.path.normpath(fname[string_length:]).split(os.path.sep)[1:])
- return filename_handler_ignore_directive(fname)
-
-
-def filename_handler_add_prefix(fname, prefix):
- """A filename handler that adds a prefix.
-
- Args:
- fname (str): A file name.
- prefix (str): A prefix.
-
- Returns:
- str: Prefix plus file name.
- """
- return prefix + fname
-
-
-def filename_handler_absolute2relative(fname, dir):
- """A filename handler that turns an absolute file name into a relative one.
-
- Args:
- fname (str): A file name.
- dir (str): A directory.
-
- Returns:
- str: The file name relative to the directory.
- """
- # TODO: that's not right
- if fname.startswith(dir):
- return fname[len(dir) :]
- return fname
-
-
-def filename_handler_relative(fname, dir):
- """A filename handler that joins a file name with a directory.
-
- Args:
- fname (str): A file name.
- dir (str): A directory.
-
- Returns:
- str: The directory joined with the file name.
- """
- return os.path.join(dir, filename_handler_ignore_directive_package(fname))
-
-
-def filename_handler_relative_to_urdf_file(fname, urdf_fname):
- return filename_handler_relative(fname, os.path.dirname(urdf_fname))
-
-
-def filename_handler_relative_to_urdf_file_recursive(fname, urdf_fname, level=0):
- if level == 0:
- return filename_handler_relative_to_urdf_file(fname, urdf_fname)
- return filename_handler_relative_to_urdf_file_recursive(fname, os.path.split(urdf_fname)[0], level=level - 1)
-
-
-def _create_filename_handlers_to_urdf_file_recursive(urdf_fname):
- return [
- partial(
- filename_handler_relative_to_urdf_file_recursive,
- urdf_fname=urdf_fname,
- level=i,
- )
- for i in range(len(os.path.normpath(urdf_fname).split(os.path.sep)))
- ]
-
-
-def filename_handler_meta(fname, filename_handlers):
- """A filename handler that calls other filename handlers until the resulting file name points to an existing file.
-
- Args:
- fname (str): A file name.
- filename_handlers (list(fn)): A list of function pointers to filename handlers.
-
- Returns:
- str: The resolved file name that points to an existing file or the input if none of the files exists.
- """
- for fn in filename_handlers:
- candidate_fname = fn(fname=fname)
- _logger.debug(f"Checking filename: {candidate_fname}")
- if os.path.isfile(candidate_fname):
- return candidate_fname
- _logger.warning(f"Unable to resolve filename: {fname}")
- return fname
-
-
-def filename_handler_magic(fname, dir):
- """A magic filename handler.
-
- Args:
- fname (str): A file name.
- dir (str): A directory.
-
- Returns:
- str: The file name that exists or the input if nothing is found.
- """
- return filename_handler_meta(
- fname=fname,
- filename_handlers=[
- partial(filename_handler_relative, dir=dir),
- filename_handler_ignore_directive,
- ]
- + _create_filename_handlers_to_urdf_file_recursive(urdf_fname=dir),
- )
-
-
-def validation_handler_strict(errors):
- """A validation handler that does not allow any errors.
-
- Args:
- errors (list[yourdfpy.URDFError]): List of errors.
-
- Returns:
- bool: Whether any errors were found.
- """
- return len(errors) == 0
-
-
-class URDF:
- def __init__(
- self,
- robot: Robot = None,
- build_scene_graph: bool = True,
- build_collision_scene_graph: bool = False,
- load_meshes: bool = True,
- load_collision_meshes: bool = False,
- filename_handler=None,
- mesh_dir: str = "",
- force_mesh: bool = False,
- force_collision_mesh: bool = True,
- build_tree: bool = False,
- ):
- """A URDF model.
-
- Args:
- robot (Robot): The robot model. Defaults to None.
- build_scene_graph (bool, optional): Wheter to build a scene graph to enable transformation queries and forward kinematics. Defaults to True.
- build_collision_scene_graph (bool, optional): Wheter to build a scene graph for elements. Defaults to False.
- load_meshes (bool, optional): Whether to load the meshes referenced in the elements. Defaults to True.
- load_collision_meshes (bool, optional): Whether to load the collision meshes referenced in the elements. Defaults to False.
- filename_handler ([type], optional): Any function f(in: str) -> str, that maps filenames in the URDF to actual resources. Can be used to customize treatment of `package://` directives or relative/absolute filenames. Defaults to None.
- mesh_dir (str, optional): A root directory used for loading meshes. Defaults to "".
- force_mesh (bool, optional): Each loaded geometry will be concatenated into a single one (instead of being turned into a graph; in case the underlying file contains multiple geometries). This might loose texture information but the resulting scene graph will be smaller. Defaults to False.
- force_collision_mesh (bool, optional): Same as force_mesh, but for collision scene. Defaults to True.
- build_tree (bool, optional): Build the tree structure for global kinematics computation
- """
- if filename_handler is None:
- self._filename_handler = partial(filename_handler_magic, dir=mesh_dir)
- else:
- self._filename_handler = filename_handler
-
- self.robot = robot
- self._create_maps()
- self._update_actuated_joints()
-
- self._cfg = self.zero_cfg
-
- if build_scene_graph or build_collision_scene_graph:
- self._base_link = self._determine_base_link()
- else:
- self._base_link = None
-
- self._errors = []
-
- if build_scene_graph:
- self._scene = self._create_scene(
- use_collision_geometry=False,
- load_geometry=load_meshes,
- force_mesh=force_mesh,
- force_single_geometry_per_link=force_mesh,
- )
- else:
- self._scene = None
-
- if build_collision_scene_graph:
- self._scene_collision = self._create_scene(
- use_collision_geometry=True,
- load_geometry=load_collision_meshes,
- force_mesh=force_collision_mesh,
- force_single_geometry_per_link=force_collision_mesh,
- )
- else:
- self._scene_collision = None
-
- if build_tree:
- self.tree_root = self.build_tree()
- else:
- self.tree_root = None
-
- @property
- def scene(self) -> trimesh.Scene:
- """A scene object representing the URDF model.
-
- Returns:
- trimesh.Scene: A trimesh scene object.
- """
- return self._scene
-
- @property
- def collision_scene(self) -> trimesh.Scene:
- """A scene object representing the elements of the URDF model
-
- Returns:
- trimesh.Scene: A trimesh scene object.
- """
- return self._scene_collision
-
- @property
- def link_map(self) -> dict:
- """A dictionary mapping link names to link objects.
-
- Returns:
- dict: Mapping from link name (str) to Link.
- """
- return self._link_map
-
- @property
- def joint_map(self) -> dict:
- """A dictionary mapping joint names to joint objects.
-
- Returns:
- dict: Mapping from joint name (str) to Joint.
- """
- return self._joint_map
-
- @property
- def joint_names(self):
- """List of joint names.
-
- Returns:
- list[str]: List of joint names of the URDF model.
- """
- return [j.name for j in self.robot.joints]
-
- @property
- def actuated_joints(self):
- """List of actuated joints. This excludes mimic and fixed joints.
-
- Returns:
- list[Joint]: List of actuated joints of the URDF model.
- """
- return self._actuated_joints
-
- @property
- def actuated_dof_indices(self):
- """List of DOF indices per actuated joint. Can be used to reference configuration.
-
- Returns:
- list[list[int]]: List of DOF indices per actuated joint.
- """
- return self._actuated_dof_indices
-
- @property
- def actuated_joint_indices(self):
- """List of indices of all joints that are actuated, i.e., not of type mimic or fixed.
-
- Returns:
- list[int]: List of indices of actuated joints.
- """
- return self._actuated_joint_indices
-
- @property
- def actuated_joint_names(self):
- """List of names of actuated joints. This excludes mimic and fixed joints.
-
- Returns:
- list[str]: List of names of actuated joints of the URDF model.
- """
- return [j.name for j in self._actuated_joints]
-
- @property
- def num_actuated_joints(self):
- """Number of actuated joints.
-
- Returns:
- int: Number of actuated joints.
- """
- return len(self.actuated_joints)
-
- @property
- def num_dofs(self):
- """Number of degrees of freedom of actuated joints. Depending on the type of the joint, the number of DOFs might vary.
-
- Returns:
- int: Degrees of freedom.
- """
- total_num_dofs = 0
- for j in self._actuated_joints:
- if j.type in ["revolute", "prismatic", "continuous"]:
- total_num_dofs += 1
- elif j.type == "floating":
- total_num_dofs += 6
- elif j.type == "planar":
- total_num_dofs += 2
- return total_num_dofs
-
- @property
- def zero_cfg(self):
- """Return the zero configuration.
-
- Returns:
- np.ndarray: The zero configuration.
- """
- return np.zeros(self.num_dofs)
-
- @property
- def center_cfg(self):
- """Return center configuration of URDF model by using the average of each joint's limits if present, otherwise zero.
-
- Returns:
- (n), float: Default configuration of URDF model.
- """
- config = []
- config_names = []
- for j in self._actuated_joints:
- if j.type == "revolute" or j.type == "prismatic":
- if j.limit is not None:
- cfg = [j.limit.lower + 0.5 * (j.limit.upper - j.limit.lower)]
- else:
- cfg = [0.0]
- elif j.type == "continuous":
- cfg = [0.0]
- elif j.type == "floating":
- cfg = [0.0] * 6
- elif j.type == "planar":
- cfg = [0.0] * 2
-
- config.append(cfg)
- config_names.append(j.name)
-
- for i, j in enumerate(self.robot.joints):
- if j.mimic is not None:
- index = config_names.index(j.mimic.joint)
- config[i][0] = config[index][0] * j.mimic.multiplier + j.mimic.offset
-
- if len(config) == 0:
- return np.array([], dtype=np.float64)
- return np.concatenate(config)
-
- @property
- def cfg(self):
- """Current configuration.
-
- Returns:
- np.ndarray: Current configuration of URDF model.
- """
- return self._cfg
-
- @property
- def base_link(self):
- """Name of URDF base/root link.
-
- Returns:
- str: Name of base link of URDF model.
- """
- return self._base_link
-
- @property
- def errors(self) -> list:
- """A list with validation errors.
-
- Returns:
- list: A list of validation errors.
- """
- return self._errors
-
- def clear_errors(self):
- """Clear the validation error log."""
- self._errors = []
-
- def show(self, collision_geometry=False, callback=None):
- """Open a simpler viewer displaying the URDF model.
-
- Args:
- collision_geometry (bool, optional): Whether to display the or elements. Defaults to False.
- """
- if collision_geometry:
- if self._scene_collision is None:
- raise ValueError(
- "No collision scene available. Use build_collision_scene_graph=True and load_collision_meshes=True during loading."
- )
- else:
- self._scene_collision.show(callback=callback)
- else:
- if self._scene is None:
- raise ValueError("No scene available. Use build_scene_graph=True and load_meshes=True during loading.")
- elif len(self._scene.bounds_corners) < 1:
- raise ValueError(
- "Scene is empty, maybe meshes failed to load? Use build_scene_graph=True and load_meshes=True during loading."
- )
- else:
- self._scene.show(callback=callback)
-
- def validate(self, validation_fn=None) -> bool:
- """Validate URDF model.
-
- Args:
- validation_fn (function, optional): A function f(list[yourdfpy.URDFError]) -> bool. None uses the strict handler (any error leads to False). Defaults to None.
-
- Returns:
- bool: Whether the model is valid.
- """
- self._errors = []
- self._validate_robot(self.robot)
-
- if validation_fn is None:
- validation_fn = validation_handler_strict
-
- return validation_fn(self._errors)
-
- def _create_maps(self):
- self._material_map = {}
- for m in self.robot.materials:
- self._material_map[m.name] = m
-
- self._joint_map = {}
- for j in self.robot.joints:
- self._joint_map[j.name] = j
-
- self._link_map = {}
- for l in self.robot.links:
- self._link_map[l.name] = l
-
- def _update_actuated_joints(self):
- self._actuated_joints = []
- self._actuated_joint_indices = []
- self._actuated_dof_indices = []
-
- dof_indices_cnt = 0
- for i, j in enumerate(self.robot.joints):
- if j.mimic is None and j.type != "fixed":
- self._actuated_joints.append(j)
- self._actuated_joint_indices.append(i)
-
- if j.type in ["prismatic", "revolute", "continuous"]:
- self._actuated_dof_indices.append([dof_indices_cnt])
- dof_indices_cnt += 1
- elif j.type == "floating":
- self._actuated_dof_indices.append([dof_indices_cnt, dof_indices_cnt + 1, dof_indices_cnt + 2])
- dof_indices_cnt += 3
- elif j.type == "planar":
- self._actuated_dof_indices.append([dof_indices_cnt, dof_indices_cnt + 1])
- dof_indices_cnt += 2
-
- def _validate_required_attribute(self, attribute, error_msg, allowed_values=None):
- if attribute is None:
- self._errors.append(URDFIncompleteError(error_msg))
- elif isinstance(attribute, str) and len(attribute) == 0:
- self._errors.append(URDFIncompleteError(error_msg))
-
- if allowed_values is not None and attribute is not None:
- if attribute not in allowed_values:
- self._errors.append(URDFAttributeValueError(error_msg))
-
- @staticmethod
- def load(fname_or_file, add_dummy_free_joints=False, **kwargs):
- """Load URDF file from filename or file object.
-
- Args:
- fname_or_file (str or file object): A filename or file object, file-like object, stream representing the URDF file.
- **build_scene_graph (bool, optional): Wheter to build a scene graph to enable transformation queries and forward kinematics. Defaults to True.
- **build_collision_scene_graph (bool, optional): Wheter to build a scene graph for elements. Defaults to False.
- **load_meshes (bool, optional): Whether to load the meshes referenced in the elements. Defaults to True.
- **load_collision_meshes (bool, optional): Whether to load the collision meshes referenced in the elements. Defaults to False.
- **filename_handler ([type], optional): Any function f(in: str) -> str, that maps filenames in the URDF to actual resources. Can be used to customize treatment of `package://` directives or relative/absolute filenames. Defaults to None.
- **mesh_dir (str, optional): A root directory used for loading meshes. Defaults to "".
- **force_mesh (bool, optional): Each loaded geometry will be concatenated into a single one (instead of being turned into a graph; in case the underlying file contains multiple geometries). This might loose texture information but the resulting scene graph will be smaller. Defaults to False.
- **force_collision_mesh (bool, optional): Same as force_mesh, but for collision scene. Defaults to True.
-
- Raises:
- ValueError: If filename does not exist.
-
- Returns:
- yourdfpy.URDF: URDF model.
- """
- if isinstance(fname_or_file, six.string_types):
- if not os.path.isfile(fname_or_file):
- raise ValueError("{} is not a file".format(fname_or_file))
-
- if not "mesh_dir" in kwargs:
- kwargs["mesh_dir"] = os.path.dirname(fname_or_file)
-
- try:
- parser = etree.XMLParser(remove_blank_text=True)
- tree = etree.parse(fname_or_file, parser=parser)
- xml_root = tree.getroot()
- except Exception as e:
- _logger.error(e)
- _logger.error("Using different parsing approach.")
-
- events = ("start", "end", "start-ns", "end-ns")
- xml = etree.iterparse(fname_or_file, recover=True, events=events)
-
- # Iterate through all XML elements
- for action, elem in xml:
- # Skip comments and processing instructions,
- # because they do not have names
- if not (isinstance(elem, etree._Comment) or isinstance(elem, etree._ProcessingInstruction)):
- # Remove a namespace URI in the element's name
- # elem.tag = etree.QName(elem).localname
- if action == "end" and ":" in elem.tag:
- elem.getparent().remove(elem)
-
- xml_root = xml.root
-
- # Remove comments
- etree.strip_tags(xml_root, etree.Comment)
- etree.cleanup_namespaces(xml_root)
-
- return URDF(
- robot=URDF._parse_robot(xml_element=xml_root, add_dummy_free_joints=add_dummy_free_joints), **kwargs
- )
-
- def contains(self, key, value, element=None) -> bool:
- """Checks recursively whether the URDF tree contains the provided key-value pair.
-
- Args:
- key (str): A key.
- value (str): A value.
- element (etree.Element, optional): The XML element from which to start the recursive search. None means URDF root. Defaults to None.
-
- Returns:
- bool: Whether the key-value pair was found.
- """
- if element is None:
- element = self.robot
-
- result = False
- for field in element.__dataclass_fields__:
- field_value = getattr(element, field)
- if is_dataclass(field_value):
- result = result or self.contains(key=key, value=value, element=field_value)
- elif isinstance(field_value, list) and len(field_value) > 0 and is_dataclass(field_value[0]):
- for field_value_element in field_value:
- result = result or self.contains(key=key, value=value, element=field_value_element)
- else:
- if key == field and value == field_value:
- result = True
- return result
-
- def _determine_base_link(self):
- """Get the base link of the URDF tree by extracting all links without parents.
- In case multiple links could be root chose the first.
-
- Returns:
- str: Name of the base link.
- """
- link_names = [l.name for l in self.robot.links]
-
- for j in self.robot.joints:
- link_names.remove(j.child)
-
- if len(link_names) == 0:
- # raise Error?
- return None
-
- return link_names[0]
-
- def _forward_kinematics_joint(self, joint, q=None):
- origin = np.eye(4) if joint.origin is None else joint.origin
-
- if joint.mimic is not None:
- if joint.mimic.joint in self.actuated_joint_names:
- mimic_joint_index = self.actuated_joint_names.index(joint.mimic.joint)
- q = self._cfg[mimic_joint_index] * joint.mimic.multiplier + joint.mimic.offset
- else:
- _logger.warning(
- f"Joint '{joint.name}' is supposed to mimic '{joint.mimic.joint}'. But this joint is not actuated - will assume (0.0 + offset)."
- )
- q = 0.0 + joint.mimic.offset
-
- if joint.type in ["revolute", "prismatic", "continuous"]:
- if q is None:
- # Use internal cfg vector for forward kinematics
- q = float(self.cfg[self.actuated_dof_indices[self.actuated_joint_names.index(joint.name)]])
-
- if joint.type == "prismatic":
- matrix = origin @ tra.translation_matrix(q * joint.axis)
- else:
- matrix = origin @ tra.rotation_matrix(q, joint.axis)
- else:
- # this includes: floating, planar, fixed
- matrix = origin
-
- return matrix, q
-
- def update_cfg(self, configuration):
- """Update joint configuration of URDF; does forward kinematics.
-
- Args:
- configuration (dict, list[float], tuple[float] or np.ndarray): A mapping from joints or joint names to configuration values, or a list containing a value for each actuated joint.
-
- Raises:
- ValueError: Raised if dimensionality of configuration does not match number of actuated joints of URDF model.
- TypeError: Raised if configuration is neither a dict, list, tuple or np.ndarray.
- """
- joint_cfg = []
-
- if isinstance(configuration, dict):
- for joint in configuration:
- if isinstance(joint, six.string_types):
- joint_cfg.append((self._joint_map[joint], configuration[joint]))
- elif isinstance(joint, Joint):
- # TODO: Joint is not hashable; so this branch will not succeed
- joint_cfg.append((joint, configuration[joint]))
- elif isinstance(configuration, (list, tuple, np.ndarray)):
- if len(configuration) == len(self.robot.joints):
- for joint, value in zip(self.robot.joints, configuration):
- joint_cfg.append((joint, value))
- elif len(configuration) == self.num_actuated_joints:
- for joint, value in zip(self._actuated_joints, configuration):
- joint_cfg.append((joint, value))
- else:
- raise ValueError(
- f"Dimensionality of configuration ({len(configuration)}) doesn't match number of all ({len(self.robot.joints)}) or actuated joints ({self.num_actuated_joints})."
- )
- else:
- raise TypeError("Invalid type for configuration")
-
- # append all mimic joints in the update
- for j, q in joint_cfg + [(j, 0.0) for j in self.robot.joints if j.mimic is not None]:
- matrix, joint_q = self._forward_kinematics_joint(j, q=q)
-
- # update internal configuration vector - only consider actuated joints
- if j.name in self.actuated_joint_names:
- self._cfg[self.actuated_dof_indices[self.actuated_joint_names.index(j.name)]] = joint_q
-
- if self._scene is not None:
- self._scene.graph.update(frame_from=j.parent, frame_to=j.child, matrix=matrix)
- if self._scene_collision is not None:
- self._scene_collision.graph.update(frame_from=j.parent, frame_to=j.child, matrix=matrix)
-
- def get_transform(self, frame_to, frame_from=None, collision_geometry=False):
- """Get the transform from one frame to another.
-
- Args:
- frame_to (str): Node name.
- frame_from (str, optional): Node name. If None it will be set to self.base_frame. Defaults to None.
- collision_geometry (bool, optional): Whether to use the collision geometry scene graph (instead of the visual geometry). Defaults to False.
-
- Raises:
- ValueError: Raised if scene graph wasn't constructed during intialization.
-
- Returns:
- (4, 4) float: Homogeneous transformation matrix
- """
- if collision_geometry:
- if self._scene_collision is None:
- raise ValueError("No collision scene available. Use build_collision_scene_graph=True during loading.")
- else:
- return self._scene_collision.graph.get(frame_to=frame_to, frame_from=frame_from)[0]
- else:
- if self._scene is None:
- raise ValueError("No scene available. Use build_scene_graph=True during loading.")
- else:
- return self._scene.graph.get(frame_to=frame_to, frame_from=frame_from)[0]
-
- def _link_mesh(self, link, collision_geometry=True):
- geometries = link.collisions if collision_geometry else link.visuals
-
- if len(geometries) == 0:
- return None
-
- meshes = []
- for g in geometries:
- for m in g.geometry.meshes:
- m = m.copy()
- pose = g.origin
- if g.geometry.mesh is not None:
- if g.geometry.mesh.scale is not None:
- S = np.eye(4)
- S[:3, :3] = np.diag(g.geometry.mesh.scale)
- pose = pose.dot(S)
- m.apply_transform(pose)
- meshes.append(m)
- if len(meshes) == 0:
- return None
- self._collision_mesh = meshes[0] + meshes[1:]
- return self._collision_mesh
-
- def _geometry2trimeshscene(self, geometry, load_file, force_mesh, skip_materials):
- new_s = None
- if geometry.box is not None:
- new_s = trimesh.primitives.Box(extents=geometry.box.size).scene()
- elif geometry.sphere is not None:
- new_s = trimesh.primitives.Sphere(radius=geometry.sphere.radius).scene()
- elif geometry.cylinder is not None:
- new_s = trimesh.primitives.Cylinder(
- radius=geometry.cylinder.radius, height=geometry.cylinder.length
- ).scene()
- elif geometry.mesh is not None and load_file:
- new_filename = self._filename_handler(fname=geometry.mesh.filename)
-
- if os.path.isfile(new_filename):
- _logger.debug(f"Loading {geometry.mesh.filename} as {new_filename}")
-
- if force_mesh:
- new_g = trimesh.load(
- new_filename,
- ignore_broken=True,
- force="mesh",
- skip_materials=skip_materials,
- )
-
- # add original filename
- if "file_path" not in new_g.metadata:
- new_g.metadata["file_path"] = os.path.abspath(new_filename)
- new_g.metadata["file_name"] = os.path.basename(new_filename)
-
- new_s = trimesh.Scene()
- new_s.add_geometry(new_g)
- else:
- new_s = trimesh.load(
- new_filename,
- ignore_broken=True,
- force="scene",
- skip_materials=skip_materials,
- )
-
- if "file_path" in new_s.metadata:
- for i, (_, geom) in enumerate(new_s.geometry.items()):
- if "file_path" not in geom.metadata:
- geom.metadata["file_path"] = new_s.metadata["file_path"]
- geom.metadata["file_name"] = new_s.metadata["file_name"]
- geom.metadata["file_element"] = i
-
- # scale mesh appropriately
- if geometry.mesh.scale is not None:
- if isinstance(geometry.mesh.scale, float):
- new_s = new_s.scaled(geometry.mesh.scale)
- elif isinstance(geometry.mesh.scale, np.ndarray):
- new_s = new_s.scaled(geometry.mesh.scale)
- else:
- _logger.warning(f"Warning: Can't interpret scale '{geometry.mesh.scale}'")
- else:
- _logger.warning(f"Can't find {new_filename}")
- return new_s
-
- def _add_geometries_to_scene(
- self,
- s,
- geometries,
- link_name,
- load_geometry,
- force_mesh,
- force_single_geometry,
- skip_materials,
- ):
- if force_single_geometry:
- tmp_scene = trimesh.Scene(base_frame=link_name)
-
- first_geom_name = None
-
- for v in geometries:
- if v.geometry is not None:
- if first_geom_name is None:
- first_geom_name = v.name
-
- new_s = self._geometry2trimeshscene(
- geometry=v.geometry,
- load_file=load_geometry,
- force_mesh=force_mesh,
- skip_materials=skip_materials,
- )
- if new_s is not None:
- origin = v.origin if v.origin is not None else np.eye(4)
-
- if force_single_geometry:
- for name, geom in new_s.geometry.items():
- if isinstance(v, Visual):
- apply_visual_color(geom, v, self._material_map)
- tmp_scene.add_geometry(
- geometry=geom,
- geom_name=v.name,
- parent_node_name=link_name,
- transform=origin @ new_s.graph.get(name)[0],
- )
- else:
- # The following map is used to deal with glb format
- # when the graph node and geometry have different names
- geom_name_map = {new_s.graph[node_name][1]: node_name for node_name in new_s.graph.nodes}
- for name, geom in new_s.geometry.items():
- if isinstance(v, Visual):
- apply_visual_color(geom, v, self._material_map)
- s.add_geometry(
- geometry=geom,
- geom_name=v.name,
- parent_node_name=link_name,
- transform=origin @ new_s.graph.get(geom_name_map[name])[0],
- )
-
- if force_single_geometry and len(tmp_scene.geometry) > 0:
- s.add_geometry(
- geometry=tmp_scene.dump(concatenate=True),
- geom_name=first_geom_name,
- parent_node_name=link_name,
- transform=np.eye(4),
- )
-
- def _create_scene(
- self,
- use_collision_geometry=False,
- load_geometry=True,
- force_mesh=False,
- force_single_geometry_per_link=False,
- ):
- s = trimesh.scene.Scene(base_frame=self._base_link)
-
- for j in self.robot.joints:
- matrix, _ = self._forward_kinematics_joint(j)
-
- s.graph.update(frame_from=j.parent, frame_to=j.child, matrix=matrix)
-
- for l in self.robot.links:
- if l.name not in s.graph.nodes and l.name != s.graph.base_frame:
- _logger.warning(f"{l.name} not connected via joints. Will add link to base frame.")
- s.graph.update(frame_from=s.graph.base_frame, frame_to=l.name)
-
- meshes = l.collisions if use_collision_geometry else l.visuals
- self._add_geometries_to_scene(
- s,
- geometries=meshes,
- link_name=l.name,
- load_geometry=load_geometry,
- force_mesh=force_mesh,
- force_single_geometry=force_single_geometry_per_link,
- skip_materials=use_collision_geometry,
- )
-
- return s
-
- def _successors(self, node):
- """
- Get all nodes of the scene that succeeds a specified node.
-
- Parameters
- ------------
- node : any
- Hashable key in `scene.graph`
-
- Returns
- -----------
- subnodes : set[str]
- Set of nodes.
- """
- # get every node that is a successor to specified node
- # this includes `node`
- return self._scene.graph.transforms.successors(node)
-
- def _create_subrobot(self, robot_name, root_link_name):
- subrobot = Robot(name=robot_name)
- subnodes = self._successors(node=root_link_name)
-
- if len(subnodes) > 0:
- for node in subnodes:
- if node in self.link_map:
- subrobot.links.append(copy.deepcopy(self.link_map[node]))
- for joint_name, joint in self.joint_map.items():
- if joint.parent in subnodes and joint.child in subnodes:
- subrobot.joints.append(copy.deepcopy(self.joint_map[joint_name]))
-
- return subrobot
-
- def split_along_joints(self, joint_type="floating", **kwargs):
- """Split URDF model along a particular joint type.
- The result is a set of URDF models which together compose the original URDF.
-
- Args:
- joint_type (str, or list[str], optional): Type of joint to use for splitting. Defaults to "floating".
- **kwargs: Arguments delegated to URDF constructor of new URDF models.
-
- Returns:
- list[(np.ndarray, yourdfpy.URDF)]: A list of tuples (np.ndarray, yourdfpy.URDF) whereas each homogeneous 4x4 matrix describes the root transformation of the respective URDF model w.r.t. the original URDF.
- """
- root_urdf = URDF(robot=copy.deepcopy(self.robot), build_scene_graph=False, load_meshes=False)
- result = []
-
- joint_types = joint_type if isinstance(joint_type, list) else [joint_type]
-
- # find all relevant joints
- joint_names = [j.name for j in self.robot.joints if j.type in joint_types]
- for joint_name in joint_names:
- root_link = self.link_map[self.joint_map[joint_name].child]
- new_robot = self._create_subrobot(
- robot_name=root_link.name,
- root_link_name=root_link.name,
- )
-
- result.append(
- (
- self._scene.graph.get(root_link.name)[0],
- URDF(robot=new_robot, **kwargs),
- )
- )
-
- # remove links and joints from root robot
- for j in new_robot.joints:
- root_urdf.robot.joints.remove(root_urdf.joint_map[j.name])
- for l in new_robot.links:
- root_urdf.robot.links.remove(root_urdf.link_map[l.name])
-
- # remove joint that connects root urdf to root_link
- if root_link.name in [j.child for j in root_urdf.robot.joints]:
- root_urdf.robot.joints.remove(
- root_urdf.robot.joints[[j.child for j in root_urdf.robot.joints].index(root_link.name)]
- )
-
- result.insert(0, (np.eye(4), URDF(robot=root_urdf.robot, **kwargs)))
-
- return result
-
- def validate_filenames(self):
- for l in self.robot.links:
- meshes = [m.geometry.mesh for m in l.collisions + l.visuals if m.geometry.mesh is not None]
- for m in meshes:
- _logger.debug(m.filename, "-->", self._filename_handler(m.filename))
- if not os.path.isfile(self._filename_handler(m.filename)):
- return False
- return True
-
- def write_xml(self):
- """Write URDF model to an XML element hierarchy.
-
- Returns:
- etree.ElementTree: XML data.
- """
- xml_element = self._write_robot(self.robot)
- return etree.ElementTree(xml_element)
-
- def write_xml_string(self, **kwargs):
- """Write URDF model to a string.
-
- Returns:
- str: String of the xml representation of the URDF model.
- """
- xml_element = self.write_xml()
- return etree.tostring(xml_element, xml_declaration=True, *kwargs)
-
- def write_xml_file(self, fname):
- """Write URDF model to an xml file.
-
- Args:
- fname (str): Filename of the file to be written. Usually ends in `.urdf`.
- """
- xml_element = self.write_xml()
- xml_element.write(fname, xml_declaration=True, pretty_print=True)
-
- def _parse_mimic(xml_element):
- if xml_element is None:
- return None
-
- return Mimic(
- joint=xml_element.get("joint"),
- multiplier=_str2float(xml_element.get("multiplier", 1.0)),
- offset=_str2float(xml_element.get("offset", 0.0)),
- )
-
- def _write_mimic(self, xml_parent, mimic):
- etree.SubElement(
- xml_parent,
- "mimic",
- attrib={
- "joint": mimic.joint,
- "multiplier": str(mimic.multiplier),
- "offset": str(mimic.offset),
- },
- )
-
- def _parse_safety_controller(xml_element):
- if xml_element is None:
- return None
-
- return SafetyController(
- soft_lower_limit=_str2float(xml_element.get("soft_lower_limit")),
- soft_upper_limit=_str2float(xml_element.get("soft_upper_limit")),
- k_position=_str2float(xml_element.get("k_position")),
- k_velocity=_str2float(xml_element.get("k_velocity")),
- )
-
- def _write_safety_controller(self, xml_parent, safety_controller):
- etree.SubElement(
- xml_parent,
- "safety_controller",
- attrib={
- "soft_lower_limit": str(safety_controller.soft_lower_limit),
- "soft_upper_limit": str(safety_controller.soft_upper_limit),
- "k_position": str(safety_controller.k_position),
- "k_velocity": str(safety_controller.k_velocity),
- },
- )
-
- def _parse_transmission_joint(xml_element):
- if xml_element is None:
- return None
-
- transmission_joint = TransmissionJoint(name=xml_element.get("name"))
-
- for h in xml_element.findall("hardware_interface"):
- transmission_joint.hardware_interfaces.append(h.text)
-
- return transmission_joint
-
- def _write_transmission_joint(self, xml_parent, transmission_joint):
- xml_element = etree.SubElement(
- xml_parent,
- "joint",
- attrib={
- "name": str(transmission_joint.name),
- },
- )
- for h in transmission_joint.hardware_interfaces:
- tmp = etree.SubElement(
- xml_element,
- "hardwareInterface",
- )
- tmp.text = h
-
- def _parse_actuator(xml_element):
- if xml_element is None:
- return None
-
- actuator = Actuator(name=xml_element.get("name"))
- if xml_element.find("mechanicalReduction"):
- actuator.mechanical_reduction = float(xml_element.find("mechanicalReduction").text)
-
- for h in xml_element.findall("hardwareInterface"):
- actuator.hardware_interfaces.append(h.text)
-
- return actuator
-
- def _write_actuator(self, xml_parent, actuator):
- xml_element = etree.SubElement(
- xml_parent,
- "actuator",
- attrib={
- "name": str(actuator.name),
- },
- )
- if actuator.mechanical_reduction is not None:
- tmp = etree.SubElement("mechanicalReduction")
- tmp.text = str(actuator.mechanical_reduction)
-
- for h in actuator.hardware_interfaces:
- tmp = etree.SubElement(
- xml_element,
- "hardwareInterface",
- )
- tmp.text = h
-
- def _parse_transmission(xml_element):
- if xml_element is None:
- return None
-
- transmission = Transmission(name=xml_element.get("name"))
-
- for j in xml_element.findall("joint"):
- transmission.joints.append(URDF._parse_transmission_joint(j))
- for a in xml_element.findall("actuator"):
- transmission.actuators.append(URDF._parse_actuator(a))
-
- return transmission
-
- def _write_transmission(self, xml_parent, transmission):
- xml_element = etree.SubElement(
- xml_parent,
- "transmission",
- attrib={
- "name": str(transmission.name),
- },
- )
-
- for j in transmission.joints:
- self._write_transmission_joint(xml_element, j)
-
- for a in transmission.actuators:
- self._write_actuator(xml_element, a)
-
- def _parse_calibration(xml_element):
- if xml_element is None:
- return None
-
- return Calibration(
- rising=_str2float(xml_element.get("rising")),
- falling=_str2float(xml_element.get("falling")),
- )
-
- def _write_calibration(self, xml_parent, calibration):
- etree.SubElement(
- xml_parent,
- "calibration",
- attrib={
- "rising": str(calibration.rising),
- "falling": str(calibration.falling),
- },
- )
-
- def _parse_box(xml_element):
- return Box(size=np.array(xml_element.attrib["size"].split(), dtype=float))
-
- def _write_box(self, xml_parent, box):
- etree.SubElement(xml_parent, "box", attrib={"size": " ".join(map(str, box.size))})
-
- def _parse_cylinder(xml_element):
- return Cylinder(
- radius=float(xml_element.attrib["radius"]),
- length=float(xml_element.attrib["length"]),
- )
-
- def _write_cylinder(self, xml_parent, cylinder):
- etree.SubElement(
- xml_parent,
- "cylinder",
- attrib={"radius": str(cylinder.radius), "length": str(cylinder.length)},
- )
-
- def _parse_sphere(xml_element):
- return Sphere(radius=float(xml_element.attrib["radius"]))
-
- def _write_sphere(self, xml_parent, sphere):
- etree.SubElement(xml_parent, "sphere", attrib={"radius": str(sphere.radius)})
-
- def _parse_scale(xml_element):
- if "scale" in xml_element.attrib:
- s = xml_element.get("scale").split()
- if len(s) == 0:
- return None
- elif len(s) == 1:
- return float(s[0])
- else:
- return np.array(list(map(float, s)))
- return None
-
- def _write_scale(self, xml_parent, scale):
- if scale is not None:
- if isinstance(scale, float) or isinstance(scale, int):
- xml_parent.set("scale", " ".join([str(scale)] * 3))
- else:
- xml_parent.set("scale", " ".join(map(str, scale)))
-
- def _parse_mesh(xml_element):
- return Mesh(filename=xml_element.get("filename"), scale=URDF._parse_scale(xml_element))
-
- def _write_mesh(self, xml_parent, mesh):
- # TODO: turn into different filename handler
- xml_element = etree.SubElement(
- xml_parent,
- "mesh",
- attrib={"filename": self._filename_handler(mesh.filename)},
- )
-
- self._write_scale(xml_element, mesh.scale)
-
- def _parse_geometry(xml_element):
- geometry = Geometry()
- if xml_element[0].tag == "box":
- geometry.box = URDF._parse_box(xml_element[0])
- elif xml_element[0].tag == "cylinder":
- geometry.cylinder = URDF._parse_cylinder(xml_element[0])
- elif xml_element[0].tag == "sphere":
- geometry.sphere = URDF._parse_sphere(xml_element[0])
- elif xml_element[0].tag == "mesh":
- geometry.mesh = URDF._parse_mesh(xml_element[0])
- else:
- raise ValueError(f"Unknown tag: {xml_element[0].tag}")
-
- return geometry
-
- def _validate_geometry(self, geometry):
- if geometry is None:
- self._errors.append(URDFIncompleteError(" is missing."))
-
- num_nones = sum(
- [
- x is not None
- for x in [
- geometry.box,
- geometry.cylinder,
- geometry.sphere,
- geometry.mesh,
- ]
- ]
- )
- if num_nones < 1:
- self._errors.append(
- URDFIncompleteError(
- "One of , , , needs to be defined as a child of ."
- )
- )
- elif num_nones > 1:
- self._errors.append(
- URDFError(
- "Too many of , , , defined as a child of . Only one allowed."
- )
- )
-
- def _write_geometry(self, xml_parent, geometry):
- if geometry is None:
- return
-
- xml_element = etree.SubElement(xml_parent, "geometry")
- if geometry.box is not None:
- self._write_box(xml_element, geometry.box)
- elif geometry.cylinder is not None:
- self._write_cylinder(xml_element, geometry.cylinder)
- elif geometry.sphere is not None:
- self._write_sphere(xml_element, geometry.sphere)
- elif geometry.mesh is not None:
- self._write_mesh(xml_element, geometry.mesh)
-
- def _parse_origin(xml_element):
- if xml_element is None:
- return None
-
- xyz = xml_element.get("xyz", default="0 0 0")
- rpy = xml_element.get("rpy", default="0 0 0")
-
- return tra.compose_matrix(
- translate=np.array(list(map(float, xyz.split()))),
- angles=np.array(list(map(float, rpy.split()))),
- )
-
- def _write_origin(self, xml_parent, origin):
- if origin is None:
- return
-
- etree.SubElement(
- xml_parent,
- "origin",
- attrib={
- "xyz": " ".join(map(str, tra.translation_from_matrix(origin))),
- "rpy": " ".join(map(str, tra.euler_from_matrix(origin))),
- },
- )
-
- def _parse_color(xml_element):
- if xml_element is None:
- return None
-
- rgba = xml_element.get("rgba", default="1 1 1 1")
-
- return Color(rgba=np.array(list(map(float, rgba.split()))))
-
- def _write_color(self, xml_parent, color):
- if color is None:
- return
-
- etree.SubElement(xml_parent, "color", attrib={"rgba": " ".join(map(str, color.rgba))})
-
- def _parse_texture(xml_element):
- if xml_element is None:
- return None
-
- # TODO: use texture filename handler
- return Texture(filename=xml_element.get("filename", default=None))
-
- def _write_texture(self, xml_parent, texture):
- if texture is None:
- return
-
- # TODO: use texture filename handler
- etree.SubElement(xml_parent, "texture", attrib={"filename": texture.filename})
-
- def _parse_material(xml_element):
- if xml_element is None:
- return None
-
- material = Material(name=xml_element.get("name"))
- material.color = URDF._parse_color(xml_element.find("color"))
- material.texture = URDF._parse_texture(xml_element.find("texture"))
-
- return material
-
- def _write_material(self, xml_parent, material):
- if material is None:
- return
-
- attrib = {"name": material.name} if material.name is not None else {}
- xml_element = etree.SubElement(
- xml_parent,
- "material",
- attrib=attrib,
- )
-
- self._write_color(xml_element, material.color)
- self._write_texture(xml_element, material.texture)
-
- def _parse_visual(xml_element):
- visual = Visual(name=xml_element.get("name"))
-
- visual.geometry = URDF._parse_geometry(xml_element.find("geometry"))
- visual.origin = URDF._parse_origin(xml_element.find("origin"))
- visual.material = URDF._parse_material(xml_element.find("material"))
-
- return visual
-
- def _validate_visual(self, visual):
- self._validate_geometry(visual.geometry)
-
- def _write_visual(self, xml_parent, visual):
- attrib = {"name": visual.name} if visual.name is not None else {}
- xml_element = etree.SubElement(
- xml_parent,
- "visual",
- attrib=attrib,
- )
-
- self._write_geometry(xml_element, visual.geometry)
- self._write_origin(xml_element, visual.origin)
- self._write_material(xml_element, visual.material)
-
- def _parse_collision(xml_element):
- collision = Collision(name=xml_element.get("name"))
-
- collision.geometry = URDF._parse_geometry(xml_element.find("geometry"))
- collision.origin = URDF._parse_origin(xml_element.find("origin"))
-
- return collision
-
- def _validate_collision(self, collision):
- self._validate_geometry(collision.geometry)
-
- def _write_collision(self, xml_parent, collision):
- attrib = {"name": collision.name} if collision.name is not None else {}
- xml_element = etree.SubElement(
- xml_parent,
- "collision",
- attrib=attrib,
- )
-
- self._write_geometry(xml_element, collision.geometry)
- self._write_origin(xml_element, collision.origin)
-
- def _parse_inertia(xml_element):
- if xml_element is None:
- return None
-
- x = xml_element
-
- return np.array(
- [
- [
- x.get("ixx", default=1.0),
- x.get("ixy", default=0.0),
- x.get("ixz", default=0.0),
- ],
- [
- x.get("ixy", default=0.0),
- x.get("iyy", default=1.0),
- x.get("iyz", default=0.0),
- ],
- [
- x.get("ixz", default=0.0),
- x.get("iyz", default=0.0),
- x.get("izz", default=1.0),
- ],
- ],
- dtype=np.float64,
- )
-
- def _write_inertia(self, xml_parent, inertia):
- if inertia is None:
- return None
-
- etree.SubElement(
- xml_parent,
- "inertia",
- attrib={
- "ixx": str(inertia[0, 0]),
- "ixy": str(inertia[0, 1]),
- "ixz": str(inertia[0, 2]),
- "iyy": str(inertia[1, 1]),
- "iyz": str(inertia[1, 2]),
- "izz": str(inertia[2, 2]),
- },
- )
-
- def _parse_mass(xml_element):
- if xml_element is None:
- return None
-
- return _str2float(xml_element.get("value", default=0.0))
-
- def _write_mass(self, xml_parent, mass):
- if mass is None:
- return
-
- etree.SubElement(
- xml_parent,
- "mass",
- attrib={
- "value": str(mass),
- },
- )
-
- def _parse_inertial(xml_element):
- if xml_element is None:
- return None
-
- inertial = Inertial()
- inertial.origin = URDF._parse_origin(xml_element.find("origin"))
- inertial.inertia = URDF._parse_inertia(xml_element.find("inertia"))
- inertial.mass = URDF._parse_mass(xml_element.find("mass"))
-
- return inertial
-
- def _write_inertial(self, xml_parent, inertial):
- if inertial is None:
- return
-
- xml_element = etree.SubElement(xml_parent, "inertial")
-
- self._write_origin(xml_element, inertial.origin)
- self._write_mass(xml_element, inertial.mass)
- self._write_inertia(xml_element, inertial.inertia)
-
- def _parse_link(xml_element):
- link = Link(name=xml_element.attrib["name"])
-
- link.inertial = URDF._parse_inertial(xml_element.find("inertial"))
-
- for v in xml_element.findall("visual"):
- link.visuals.append(URDF._parse_visual(v))
-
- for c in xml_element.findall("collision"):
- link.collisions.append(URDF._parse_collision(c))
-
- return link
-
- def _validate_link(self, link):
- self._validate_required_attribute(attribute=link.name, error_msg="The tag misses a 'name' attribute.")
-
- for v in link.visuals:
- self._validate_visual(v)
-
- for c in link.collisions:
- self._validate_collision(c)
-
- def _write_link(self, xml_parent, link):
- xml_element = etree.SubElement(
- xml_parent,
- "link",
- attrib={
- "name": link.name,
- },
- )
-
- self._write_inertial(xml_element, link.inertial)
- for visual in link.visuals:
- self._write_visual(xml_element, visual)
- for collision in link.collisions:
- self._write_collision(xml_element, collision)
-
- def _parse_axis(xml_element):
- if xml_element is None:
- return np.array([1.0, 0, 0])
-
- xyz = xml_element.get("xyz", "1 0 0")
- results = []
- for x in xyz.split():
- try:
- x = float(x)
- except ValueError:
- x = 0
- results.append(x)
- return np.array(results)
- # return np.array(list(map(float, xyz.split())))
-
- def _write_axis(self, xml_parent, axis):
- if axis is None:
- return
-
- etree.SubElement(xml_parent, "axis", attrib={"xyz": " ".join(map(str, axis))})
-
- def _parse_limit(xml_element):
- if xml_element is None:
- return None
-
- return Limit(
- effort=_str2float(xml_element.get("effort", default=None)),
- velocity=_str2float(xml_element.get("velocity", default=None)),
- lower=_str2float(xml_element.get("lower", default=None)),
- upper=_str2float(xml_element.get("upper", default=None)),
- )
-
- def _validate_limit(self, limit, type):
- if type in ["revolute", "prismatic"]:
- self._validate_required_attribute(
- limit,
- error_msg="The of a (prismatic, revolute) joint is missing.",
- )
-
- if limit is not None:
- self._validate_required_attribute(
- limit.upper,
- error_msg="Tag of joint is missing attribute 'upper'.",
- )
- self._validate_required_attribute(
- limit.lower,
- error_msg="Tag of joint is missing attribute 'lower'.",
- )
-
- if limit is not None:
- self._validate_required_attribute(
- limit.effort,
- error_msg="Tag of joint is missing attribute 'effort'.",
- )
-
- self._validate_required_attribute(
- limit.velocity,
- error_msg="Tag of joint is missing attribute 'velocity'.",
- )
-
- def _write_limit(self, xml_parent, limit):
- if limit is None:
- return
-
- attrib = {}
- if limit.effort is not None:
- attrib["effort"] = str(limit.effort)
- if limit.velocity is not None:
- attrib["velocity"] = str(limit.velocity)
- if limit.lower is not None:
- attrib["lower"] = str(limit.lower)
- if limit.upper is not None:
- attrib["upper"] = str(limit.upper)
-
- etree.SubElement(
- xml_parent,
- "limit",
- attrib=attrib,
- )
-
- def _parse_dynamics(xml_element):
- if xml_element is None:
- return None
-
- dynamics = Dynamics()
- dynamics.damping = xml_element.get("damping", default=None)
- dynamics.friction = xml_element.get("friction", default=None)
-
- return dynamics
-
- def _write_dynamics(self, xml_parent, dynamics):
- if dynamics is None:
- return
-
- attrib = {}
- if dynamics.damping is not None:
- attrib["damping"] = str(dynamics.damping)
- if dynamics.friction is not None:
- attrib["friction"] = str(dynamics.friction)
-
- etree.SubElement(
- xml_parent,
- "dynamics",
- attrib=attrib,
- )
-
- def _parse_joint(xml_element):
- joint = Joint(name=xml_element.attrib["name"])
-
- joint.type = xml_element.get("type", default=None)
- joint.parent = xml_element.find("parent").get("link")
- joint.child = xml_element.find("child").get("link")
- joint.origin = URDF._parse_origin(xml_element.find("origin"))
- joint.axis = URDF._parse_axis(xml_element.find("axis"))
- joint.limit = URDF._parse_limit(xml_element.find("limit"))
- joint.dynamics = URDF._parse_dynamics(xml_element.find("dynamics"))
- joint.mimic = URDF._parse_mimic(xml_element.find("mimic"))
- joint.calibration = URDF._parse_calibration(xml_element.find("calibration"))
- joint.safety_controller = URDF._parse_safety_controller(xml_element.find("safety_controller"))
-
- return joint
-
- def _validate_joint(self, joint):
- self._validate_required_attribute(
- attribute=joint.name,
- error_msg="The tag misses a 'name' attribute.",
- )
-
- allowed_types = [
- "revolute",
- "continuous",
- "prismatic",
- "fixed",
- "floating",
- "planar",
- ]
- self._validate_required_attribute(
- attribute=joint.type,
- error_msg=f"The tag misses a 'type' attribute or value is not part of allowed values [{', '.join(allowed_types)}].",
- allowed_values=allowed_types,
- )
-
- self._validate_required_attribute(
- joint.parent,
- error_msg=f"The of a is missing.",
- )
-
- self._validate_required_attribute(
- joint.child,
- error_msg=f"The of a is missing.",
- )
-
- self._validate_limit(joint.limit, type=joint.type)
-
- def _write_joint(self, xml_parent, joint):
- xml_element = etree.SubElement(
- xml_parent,
- "joint",
- attrib={
- "name": joint.name,
- "type": joint.type,
- },
- )
-
- etree.SubElement(xml_element, "parent", attrib={"link": joint.parent})
- etree.SubElement(xml_element, "child", attrib={"link": joint.child})
- self._write_origin(xml_element, joint.origin)
- self._write_axis(xml_element, joint.axis)
- self._write_limit(xml_element, joint.limit)
- self._write_dynamics(xml_element, joint.dynamics)
-
- @staticmethod
- def _parse_robot(xml_element, add_dummy_free_joints=False):
- robot = Robot(name=xml_element.attrib["name"])
-
- for l in xml_element.findall("link"):
- robot.links.append(URDF._parse_link(l))
- for j in xml_element.findall("joint"):
- robot.joints.append(URDF._parse_joint(j))
- for m in xml_element.findall("material"):
- robot.materials.append(URDF._parse_material(m))
-
- if add_dummy_free_joints:
- # Determine root link
- link_names = [l.name for l in robot.links]
- for j in robot.joints:
- link_names.remove(j.child)
-
- if len(link_names) == 0:
- raise RuntimeError(f"No root link found for robot.")
-
- root_link_name = link_names[0]
- _add_dummy_joints(robot, root_link_name)
-
- return robot
-
- def _validate_robot(self, robot):
- if robot is not None:
- self._validate_required_attribute(
- attribute=robot.name,
- error_msg="The tag misses a 'name' attribute.",
- )
-
- for l in robot.links:
- self._validate_link(l)
-
- for j in robot.joints:
- self._validate_joint(j)
-
- def _write_robot(self, robot):
- xml_element = etree.Element("robot", attrib={"name": robot.name})
- for link in robot.links:
- self._write_link(xml_element, link)
- for joint in robot.joints:
- self._write_joint(xml_element, joint)
- for material in robot.materials:
- self._write_material(xml_element, material)
-
- return xml_element
-
- def __eq__(self, other):
- if not isinstance(other, URDF):
- raise NotImplemented
- return self.robot == other.robot
-
- @property
- def filename_handler(self):
- return self._filename_handler
-
- def build_tree(self):
- parent_child_map: Dict[str, List[str]] = {}
- for joint in self.robot.joints:
- if joint.parent in parent_child_map:
- parent_child_map[joint.parent].append(joint.child)
- else:
- parent_child_map[joint.parent] = [joint.child]
-
- # Sort link with bfs order
- bfs_link_list = [self.base_link]
- to_be_handle_list = [self.base_link]
- while len(to_be_handle_list) > 0:
- parent = to_be_handle_list.pop(0)
- if parent not in parent_child_map:
- continue
-
- children = parent_child_map[parent]
- to_be_handle_list.extend(children)
- bfs_link_list.extend(children)
- bfs_joint_list = []
- for link_name in bfs_link_list[1:]:
- joint_index = [i for i in range(len(self.robot.joints)) if self.robot.joints[i].child == link_name][0]
- bfs_joint_list.append(self.robot.joints[joint_index])
-
- # Build tree
- root = Node(self.base_link, matrix=np.eye(4))
- for joint in bfs_joint_list:
- matrix, _ = self._forward_kinematics_joint(joint, 0)
- parent_node = anytree.search.findall_by_attr(root, value=joint.parent)[0]
- node = Node(joint.child, parent=parent_node, matrix=matrix)
- return root
-
- def update_kinematics(self, configuration):
- joint_cfg = []
-
- if isinstance(configuration, dict):
- for joint in configuration:
- if isinstance(joint, six.string_types):
- joint_cfg.append((self._joint_map[joint], configuration[joint]))
- elif isinstance(joint, Joint):
- # TODO: Joint is not hashable; so this branch will not succeed
- joint_cfg.append((joint, configuration[joint]))
- elif isinstance(configuration, (list, tuple, np.ndarray)):
- if len(configuration) == len(self.robot.joints):
- for joint, value in zip(self.robot.joints, configuration):
- joint_cfg.append((joint, value))
- elif len(configuration) == self.num_actuated_joints:
- for joint, value in zip(self._actuated_joints, configuration):
- joint_cfg.append((joint, value))
- else:
- raise ValueError(
- f"Dimensionality of configuration ({len(configuration)}) doesn't match number of all ({len(self.robot.joints)}) or actuated joints ({self.num_actuated_joints})."
- )
- else:
- raise TypeError("Invalid type for configuration")
-
- # append all mimic joints in the update
- for j, q in joint_cfg + [(j, 0.0) for j in self.robot.joints if j.mimic is not None]:
- matrix, _ = self._forward_kinematics_joint(j, q=q)
- node = anytree.search.findall_by_attr(self.tree_root, j.child)[0]
- node.matrix = matrix
-
- for node in LevelOrderIter(self.tree_root):
- if node.name == self.base_link:
- node.global_pose = np.eye(4)
- else:
- node.global_pose = node.parent.global_pose @ node.matrix
-
- def get_link_global_transform(self, link_name):
- node = anytree.search.findall_by_attr(self.tree_root, link_name)[0]
-
- return node.global_pose
-
-
-def _add_dummy_joints(robot: Robot, root_link_name: str):
- # Prepare link and joint properties
- translation_range = (-5, 5)
- rotation_range = (-2 * np.pi, 2 * np.pi)
- joint_types = ["prismatic"] * 3 + ["revolute"] * 3
- joint_limit = [translation_range] * 3 + [rotation_range] * 3
- joint_name = DUMMY_JOINT_NAMES.copy()
- link_name = [f"dummy_{name}_translation_link" for name in "xyz"] + [f"dummy_{name}_rotation_link" for name in "xyz"]
-
- links = []
- joints = []
-
- for i in range(6):
- inertial = Inertial(
- mass=0.01, inertia=np.array([[1e-4, 0, 0], [0, 1e-4, 0], [0, 0, 1e-4]]), origin=np.identity(4)
- )
- link = Link(name=link_name[i], inertial=inertial)
- links.append(link)
-
- joint_axis = np.zeros(3, dtype=int)
- joint_axis[i % 3] = 1
- limit = Limit(lower=joint_limit[i][0], upper=joint_limit[i][1], velocity=3.14, effort=10)
-
- child_name = link_name[i + 1] if i < 5 else root_link_name
- joint = Joint(
- name=joint_name[i],
- type=joint_types[i],
- parent=link_name[i],
- child=child_name,
- origin=np.identity(4),
- axis=joint_axis,
- limit=limit,
- )
- joints.append(joint)
-
- robot.joints = joints + robot.joints
- robot.links = links + robot.links
-
-
-DUMMY_JOINT_NAMES = [f"dummy_{name}_translation_joint" for name in "xyz"] + [
- f"dummy_{name}_rotation_joint" for name in "xyz"
-]
diff --git a/teleop/robot_control/hand_retargeting.py b/teleop/robot_control/hand_retargeting.py
index 1c67945..27f1d40 100644
--- a/teleop/robot_control/hand_retargeting.py
+++ b/teleop/robot_control/hand_retargeting.py
@@ -1,4 +1,4 @@
-from .dex_retargeting.retargeting_config import RetargetingConfig
+from dex_retargeting import RetargetingConfig
from pathlib import Path
import yaml
from enum import Enum
diff --git a/teleop/robot_control/robot_hand_unitree.py b/teleop/robot_control/robot_hand_unitree.py
index fb04cf1..3061851 100644
--- a/teleop/robot_control/robot_hand_unitree.py
+++ b/teleop/robot_control/robot_hand_unitree.py
@@ -396,7 +396,7 @@ class Gripper_JointIndex(IntEnum):
if __name__ == "__main__":
import argparse
- from teleop.open_television.tv_wrapper import TeleVisionWrapper
+ from televuer import TeleVuerWrapper
from teleop.image_server.image_client import ImageClient
parser = argparse.ArgumentParser()
@@ -432,7 +432,7 @@ if __name__ == "__main__":
image_receive_thread.start()
# television and arm
- tv_wrapper = TeleVisionWrapper(BINOCULAR, tv_img_shape, img_shm.name)
+ tv_wrapper = TeleVuerWrapper(BINOCULAR, tv_img_shape, img_shm.name)
if args.dex:
left_hand_array = Array('d', 75, lock=True)
diff --git a/teleop/teleop_hand_and_arm.py b/teleop/teleop_hand_and_arm.py
index ac9f94a..5ed2c6a 100644
--- a/teleop/teleop_hand_and_arm.py
+++ b/teleop/teleop_hand_and_arm.py
@@ -14,7 +14,7 @@ current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
sys.path.append(parent_dir)
-from teleop.open_television import TeleVisionWrapper
+from televuer import TeleVuerWrapper
from teleop.robot_control.robot_arm import G1_29_ArmController, G1_23_ArmController, H1_2_ArmController, H1_ArmController
from teleop.robot_control.robot_arm_ik import G1_29_ArmIK, G1_23_ArmIK, H1_2_ArmIK, H1_ArmIK
from teleop.robot_control.robot_hand_unitree import Dex3_1_Controller, Gripper_Controller
@@ -129,8 +129,8 @@ if __name__ == '__main__':
image_receive_thread.start()
# television: obtain hand pose data from the XR device and transmit the robot's head camera image to the XR device.
- tv_wrapper = TeleVisionWrapper(binocular=BINOCULAR, use_hand_tracking=args.xr_mode == 'hand', img_shape=tv_img_shape, img_shm_name=tv_img_shm.name,
- return_state_data=True, return_hand_rot_data = False)
+ tv_wrapper = TeleVuerWrapper(binocular=BINOCULAR, use_hand_tracking=args.xr_mode == 'hand', img_shape=tv_img_shape, img_shm_name=tv_img_shm.name,
+ return_state_data=True, return_hand_rot_data = False)
# arm
if args.arm == 'G1_29':
@@ -411,6 +411,8 @@ if __name__ == '__main__':
wrist_img_shm.close()
if args.record:
recorder.close()
+ if args.sim:
+ sim_state_subscriber.stop_subscribe()
listen_keyboard_thread.join()
logger_mp.info("Finally, exiting program...")
exit(0)
diff --git a/teleop/televuer b/teleop/televuer
new file mode 160000
index 0000000..34f4475
--- /dev/null
+++ b/teleop/televuer
@@ -0,0 +1 @@
+Subproject commit 34f4475fca12166d2c52f2469385a851f614fd4e
diff --git a/teleop/utils/data/-home-unitree-Code-unitree-opject-unitree_sim_isaaclab-.txt b/teleop/utils/data/-home-unitree-Code-unitree-opject-unitree_sim_isaaclab-.txt
deleted file mode 100644
index 5e55b46..0000000
--- a/teleop/utils/data/-home-unitree-Code-unitree-opject-unitree_sim_isaaclab-.txt
+++ /dev/null
@@ -1 +0,0 @@
-/home/unitree/Code/unitree-opject/unitree_sim_isaaclab/data
\ No newline at end of file
diff --git a/teleop/utils/sim_state_topic.py b/teleop/utils/sim_state_topic.py
index dc5db54..0dd7947 100644
--- a/teleop/utils/sim_state_topic.py
+++ b/teleop/utils/sim_state_topic.py
@@ -13,6 +13,9 @@ from typing import Any, Dict, Optional
from unitree_sdk2py.core.channel import ChannelSubscriber, ChannelFactoryInitialize
from unitree_sdk2py.idl.std_msgs.msg.dds_ import String_
+import logging_mp
+logger_mp = logging_mp.get_logger(__name__)
+
class SharedMemoryManager:
"""Shared memory manager"""
@@ -56,7 +59,7 @@ class SharedMemoryManager:
json_bytes = json_str.encode('utf-8')
if len(json_bytes) > self.size - 8: # reserve 8 bytes for length and timestamp
- print(f"Warning: Data too large for shared memory ({len(json_bytes)} > {self.size - 8})")
+ logger_mp.warning(f"Data too large for shared memory ({len(json_bytes)} > {self.size - 8})")
return False
# write timestamp (4 bytes) and data length (4 bytes)
@@ -69,7 +72,7 @@ class SharedMemoryManager:
return True
except Exception as e:
- print(f"Error writing to shared memory: {e}")
+ logger_mp.error(f"Error writing to shared memory: {e}")
return False
def read_data(self) -> Optional[Dict[str, Any]]:
@@ -94,7 +97,7 @@ class SharedMemoryManager:
return data
except Exception as e:
- print(f"Error reading from shared memory: {e}")
+ logger_mp.error(f"Error reading from shared memory: {e}")
return None
def get_name(self) -> str:
@@ -136,47 +139,46 @@ class SimStateSubscriber:
# initialize shared memory
self._setup_shared_memory()
- print(f"[SimStateSubscriber] Initialized with shared memory: {shm_name}")
+ logger_mp.info(f"[SimStateSubscriber] Initialized with shared memory: {shm_name}")
def _setup_shared_memory(self):
"""Setup shared memory"""
try:
self.shared_memory = SharedMemoryManager(self.shm_name, self.shm_size)
- print(f"[SimStateSubscriber] Shared memory setup successfully")
+ logger_mp.info(f"[SimStateSubscriber] Shared memory setup successfully")
except Exception as e:
- print(f"[SimStateSubscriber] Failed to setup shared memory: {e}")
+ logger_mp.error(f"[SimStateSubscriber] Failed to setup shared memory: {e}")
def _message_handler(self, msg: String_):
"""Handle received messages"""
try:
# parse the received message
data = json.loads(msg.data)
- # print(f"[SimStateSubscriber] Received message: {data}")
# write to shared memory
if self.shared_memory:
self.shared_memory.write_data(data)
except Exception as e:
- print(f"[SimStateSubscriber] Error processing message: {e}")
+ logger_mp.error(f"[SimStateSubscriber] Error processing message: {e}")
def _subscribe_loop(self):
"""Subscribe loop thread"""
- print(f"[SimStateSubscriber] Subscribe thread started")
+ logger_mp.info(f"[SimStateSubscriber] Subscribe thread started")
while self.running:
try:
time.sleep(0.001) # keep thread alive
except Exception as e:
- print(f"[SimStateSubscriber] Error in subscribe loop: {e}")
+ logger_mp.error(f"[SimStateSubscriber] Error in subscribe loop: {e}")
time.sleep(0.01)
- print(f"[SimStateSubscriber] Subscribe thread stopped")
+ logger_mp.info(f"[SimStateSubscriber] Subscribe thread stopped")
def start_subscribe(self):
"""Start subscribing"""
if self.running:
- print(f"[SimStateSubscriber] Already running")
+ logger_mp.info(f"[SimStateSubscriber] Already running")
return
try:
@@ -185,31 +187,34 @@ class SimStateSubscriber:
self.subscriber.Init(self._message_handler, 10)
self.running = True
- print(f"[SimStateSubscriber] Subscriber initialized")
+ logger_mp.info(f"[SimStateSubscriber] Subscriber initialized")
# start subscribe thread
self.subscribe_thread = threading.Thread(target=self._subscribe_loop)
self.subscribe_thread.daemon = True
self.subscribe_thread.start()
- print(f"[SimStateSubscriber] Started subscribing to rt/sim_state_cmd")
+ logger_mp.info(f"[SimStateSubscriber] Started subscribing to rt/sim_state_cmd")
except Exception as e:
- print(f"[SimStateSubscriber] Failed to start subscribing: {e}")
+ logger_mp.error(f"[SimStateSubscriber] Failed to start subscribing: {e}")
self.running = False
def stop_subscribe(self):
"""Stop subscribing"""
if not self.running:
return
-
- print(f"[SimStateSubscriber] Stopping subscriber...")
+
+ logger_mp.info(f"[SimStateSubscriber] Stopping subscriber...")
self.running = False
# wait for thread to finish
if self.subscribe_thread:
self.subscribe_thread.join(timeout=1.0)
-
- print(f"[SimStateSubscriber] Subscriber stopped")
+
+ if self.shared_memory:
+ # cleanup shared memory
+ self.shared_memory.cleanup()
+ logger_mp.info(f"[SimStateSubscriber] Subscriber stopped")
def read_data(self) -> Optional[Dict[str, Any]]:
"""Read data from shared memory
@@ -222,7 +227,7 @@ class SimStateSubscriber:
return self.shared_memory.read_data()
return None
except Exception as e:
- print(f"[SimStateSubscriber] Error reading data: {e}")
+ logger_mp.error(f"[SimStateSubscriber] Error reading data: {e}")
return None
def is_running(self) -> bool:
@@ -265,7 +270,7 @@ def start_sim_state_subscribe(shm_name: str = "sim_state_cmd_data", shm_size: in
# if __name__ == "__main__":
# # example usage
-# print("Starting sim state subscriber...")
+# logger_mp.info("Starting sim state subscriber...")
# ChannelFactoryInitialize(0)
# # create and start subscriber
# subscriber = start_sim_state_subscribe()
@@ -275,11 +280,11 @@ def start_sim_state_subscribe(shm_name: str = "sim_state_cmd_data", shm_size: in
# while True:
# data = subscriber.read_data()
# if data:
-# print(f"Read data: {data}")
+# logger_mp.info(f"Read data: {data}")
# time.sleep(1)
# except KeyboardInterrupt:
-# print("\nInterrupted by user")
+# logger_mp.warning("\nInterrupted by user")
# finally:
# subscriber.stop_subscribe()
-# print("Subscriber stopped")
\ No newline at end of file
+# logger_mp.info("Subscriber stopped")
\ No newline at end of file