Browse Source

[update] 1. Data recording using multithreading (.json not yet implemented) 2. Optimize rerun (not fully implemented yet).

main
silencht 1 year ago
parent
commit
b56711f7a5
  1. 3
      .gitignore
  2. 4
      teleop/open_television/tv_wrapper.py
  3. 2
      teleop/robot_control/robot_arm.py
  4. 21
      teleop/robot_control/robot_hand_unitree.py
  5. 11
      teleop/teleop_hand_and_arm.py
  6. 113
      teleop/utils/episode_writer.py
  7. 155
      teleop/utils/rerun_visualizer.py

3
.gitignore

@ -19,5 +19,4 @@ __MACOSX/
!real_right_hand.jpg
!wrist_and_ring_mount.png
teleop/data/
teleop/utils/episode_0*
teleop/utils/data/episode_0*
episode_0*

4
teleop/open_television/tv_wrapper.py

@ -139,8 +139,8 @@ class TeleVisionWrapper:
head_rmat = head_mat[:3, :3]
# The origin of the coordinate for IK Solve is the WAIST joint motor. You can use teleop/robot_control/robot_arm_ik.py Unit_Test to check it.
# The origin of the coordinate of unitree_left_wrist is HEAD. So it is necessary to translate the origin of unitree_left_wrist from HEAD to WAIST.
unitree_left_wrist[0, 3] +=0.20
unitree_right_wrist[0,3] +=0.20
unitree_left_wrist[0, 3] +=0.15
unitree_right_wrist[0,3] +=0.15
unitree_left_wrist[2, 3] +=0.45
unitree_right_wrist[2,3] +=0.45

2
teleop/robot_control/robot_arm.py

@ -41,7 +41,7 @@ class G1_29_ArmController:
self.q_target = np.zeros(14)
self.tauff_target = np.zeros(14)
self.kp_high = 100.0
self.kp_high = 300.0
self.kd_high = 3.0
self.kp_low = 80.0
self.kd_low = 3.0

21
teleop/robot_control/robot_hand_unitree.py

@ -18,6 +18,7 @@ from multiprocessing import Process, shared_memory, Array, Lock
parent2_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(parent2_dir)
from teleop.robot_control.hand_retargeting import HandRetargeting, HandType
from teleop.utils.weighted_moving_filter import WeightedMovingFilter
unitree_tip_indices = [4, 9, 14] # [thumb, index, middle] in OpenXR
@ -228,7 +229,7 @@ kTopicGripperState = "rt/unitree_actuator/state"
class Gripper_Controller:
def __init__(self, left_hand_array, right_hand_array, dual_gripper_data_lock = None, dual_gripper_state_out = None, dual_gripper_action_out = None,
fps = 250.0, Unit_Test = False):
filter = True, fps = 200.0, Unit_Test = False):
"""
[note] A *_array type parameter requires using a multiprocessing Array, because it needs to be passed to the internal child process
@ -251,6 +252,10 @@ class Gripper_Controller:
self.fps = fps
self.Unit_Test = Unit_Test
if filter:
self.smooth_filter = WeightedMovingFilter(np.array([0.5, 0.3, 0.2]), Gripper_Num_Motors)
else:
self.smooth_filter = None
if self.Unit_Test:
ChannelFactoryInitialize(0)
@ -303,8 +308,8 @@ class Gripper_Controller:
self.running = True
DELTA_GRIPPER_CMD = 0.18 # The motor rotates 5.4 radians, the clamping jaw slide open 9 cm, so 0.6 rad <==> 1 cm, 0.18 rad <==> 3 mm
THUMB_INDEX_DISTANCE_MIN = 0.030 # Assuming a minimum Euclidean distance is 3 cm between thumb and index.
THUMB_INDEX_DISTANCE_MAX = 0.120 # Assuming a maximum Euclidean distance is 12 cm between thumb and index.
THUMB_INDEX_DISTANCE_MIN = 0.05 # Assuming a minimum Euclidean distance is 5 cm between thumb and index.
THUMB_INDEX_DISTANCE_MAX = 0.07 # Assuming a maximum Euclidean distance is 9 cm between thumb and index.
LEFT_MAPPED_MIN = 0.0 # The minimum initial motor position when the gripper closes at startup.
RIGHT_MAPPED_MIN = 0.0 # The minimum initial motor position when the gripper closes at startup.
# The maximum initial motor position when the gripper closes before calibration (with the rail stroke calculated as 0.6 cm/rad * 9 rad = 5.4 cm).
@ -315,7 +320,7 @@ class Gripper_Controller:
dq = 0.0
tau = 0.0
kp = 5.0
kp = 5.00
kd = 0.05
# initialize gripper cmd msg
self.gripper_msg = MotorCmds_()
@ -335,11 +340,11 @@ class Gripper_Controller:
if not np.all(left_hand_mat == 0.0): # if hand data has been initialized.
left_euclidean_distance = np.linalg.norm(left_hand_mat[unitree_gripper_indices[1]] - left_hand_mat[unitree_gripper_indices[0]])
right_euclidean_distance = np.linalg.norm(right_hand_mat[unitree_gripper_indices[1]] - left_hand_mat[unitree_gripper_indices[0]])
right_euclidean_distance = np.linalg.norm(right_hand_mat[unitree_gripper_indices[1]] - right_hand_mat[unitree_gripper_indices[0]])
# Linear mapping from [0, THUMB_INDEX_DISTANCE_MAX] to gripper action range
left_target_action = np.interp(left_euclidean_distance, [THUMB_INDEX_DISTANCE_MIN, THUMB_INDEX_DISTANCE_MAX], [LEFT_MAPPED_MIN, LEFT_MAPPED_MAX])
right_target_action = np.interp(right_euclidean_distance, [THUMB_INDEX_DISTANCE_MIN, THUMB_INDEX_DISTANCE_MAX], [RIGHT_MAPPED_MIN, RIGHT_MAPPED_MAX])
# else: # TEST WITHOUT APPLE VISION PRO
# else: # TEST WITHOUT XR DEVICE
# current_time = time.time()
# period = 2.5
# import math
@ -357,6 +362,10 @@ class Gripper_Controller:
dual_gripper_action = np.array([right_actual_action, left_actual_action])
if self.smooth_filter:
self.smooth_filter.add_data(dual_gripper_action)
dual_gripper_action = self.smooth_filter.filtered_data
if dual_gripper_state_out and dual_gripper_action_out:
with dual_hand_data_lock:
dual_gripper_state_out[:] = dual_gripper_state - np.array([RIGHT_MAPPED_MIN, LEFT_MAPPED_MIN])

11
teleop/teleop_hand_and_arm.py

@ -97,7 +97,7 @@ if __name__ == '__main__':
gripper_ctrl = Gripper_Controller(left_hand_array, right_hand_array, dual_gripper_data_lock, dual_gripper_state_array, dual_gripper_action_array)
if args.record:
recorder = EpisodeWriter(task_dir = args.task_dir, frequency = args.frequency, log = True)
recorder = EpisodeWriter(task_dir = args.task_dir, frequency = args.frequency, rerun_log = True)
try:
user_input = input("Please enter the start signal (enter 'r' to start the subsequent program):\n")
@ -156,10 +156,10 @@ if __name__ == '__main__':
right_hand_action = dual_hand_action_array[-7:]
else:
with dual_gripper_data_lock:
left_hand_state = dual_gripper_state_array[1]
right_hand_state = dual_gripper_state_array[0]
left_hand_action = dual_gripper_action_array[1]
right_hand_action = dual_gripper_action_array[0]
left_hand_state = [dual_gripper_state_array[1]]
right_hand_state = [dual_gripper_state_array[0]]
left_hand_action = [dual_gripper_action_array[1]]
right_hand_action = [dual_gripper_action_array[0]]
# head image
current_tv_image = tv_img_array.copy()
# wrist image
@ -247,5 +247,6 @@ if __name__ == '__main__':
if WRIST:
wrist_img_shm.unlink()
wrist_img_shm.close()
recorder.close()
print("Finally, exiting program...")
exit(0)

113
teleop/utils/episode_writer.py

@ -5,10 +5,11 @@ import datetime
import numpy as np
import time
from .rerun_visualizer import RerunLogger
from queue import Queue, Empty
from threading import Thread
class EpisodeWriter():
def __init__(self, task_dir, frequency=30, image_size=[640, 480], log = False):
def __init__(self, task_dir, frequency=30, image_size=[640, 480], rerun_log = True):
"""
image_size: [width, height]
"""
@ -17,27 +18,39 @@ class EpisodeWriter():
self.frequency = frequency
self.image_size = image_size
self.rerun_log = rerun_log
if self.rerun_log:
print("==> RerunLogger initializing...\n")
self.rerun_logger = RerunLogger(prefix="online/", IdxRangeBoundary = 60, memory_limit = "300MB")
print("==> RerunLogger initializing ok.\n")
self.data = {}
self.episode_data = []
self.item_id = -1
self.episode_id = -1
self.log = log
if os.path.exists(self.task_dir):
episode_dirs = [episode_dir for episode_dir in os.listdir(self.task_dir) if 'episode_' in episode_dir]
episode_last = sorted(episode_dirs)[-1] if len(episode_dirs) > 0 else None
self.episode_id = 0 if episode_last is None else int(episode_last.split('_')[-1])
print(f"==> task_dir directory already exist, now self.episode_id is{self.episode_id}\n")
print(f"==> task_dir directory already exist, now self.episode_id is:{self.episode_id}\n")
else:
os.makedirs(self.task_dir)
print(f"==> episode directory does not exist, now create one.\n")
self.data_info()
self.text_desc()
# Initialize the queue and worker thread
self.item_data_queue = Queue(maxsize=100)
self.stop_worker = False
self.worker_thread = Thread(target=self.process_queue)
self.worker_thread.start()
print("==> EpisodeWriter initialized successfully.\n")
def data_info(self, version='1.0.0', date=None, author=None):
self.info = {
"version": "1.0.0" if version is None else version,
"date": "" if date is None else datetime.date.today().strftime('%Y-%m-%d'),
"date": datetime.date.today().strftime('%Y-%m-%d') if date is None else date,
"author": "unitree" if author is None else author,
"image": {"width":self.image_size[0], "height":self.image_size[1], "fps":self.frequency},
"depth": {"width":self.image_size[0], "height":self.image_size[1], "fps":self.frequency},
@ -85,63 +98,91 @@ class EpisodeWriter():
os.makedirs(self.color_dir, exist_ok=True)
os.makedirs(self.depth_dir, exist_ok=True)
os.makedirs(self.audio_dir, exist_ok=True)
if self.log:
self.online_logger = RerunLogger(prefix="online/", IdxRangeBoundary = 60)
if self.rerun_log:
self.online_logger = RerunLogger(prefix="online/", IdxRangeBoundary = 60, memory_limit="300MB")
def add_item(self, colors, depths=None, states=None, actions=None, tactiles=None, audios=None):
# Increment the item ID
self.item_id += 1
# Create the item data dictionary
item_data = {
'idx': self.item_id,
'colors': {"color_0": None},
'depths': {"depth_0": None},
'states': {'left_arm':{}, 'right_arm':{}, 'left_hand':{}, 'right_hand':{}, 'body': {}},
'actions': {'left_arm':{}, 'right_arm':{}, 'left_hand':{}, 'right_hand':{}, 'body': {}},
'tactiles': {'left_hand':[], 'right_hand':[]},
'audios': { "mic0": None},
'colors': colors,
'depths': depths,
'states': states,
'actions': actions,
'tactiles': tactiles,
'audios': audios,
}
# save images
if colors is not None:
for idx, (_, color) in enumerate(colors.items()):
color_key = f'color_{idx}'
color_name = f'{str(self.item_id).zfill(6)}_{color_key}.jpg'
cv2.imwrite(os.path.join(self.color_dir, color_name), color)
# Enqueue the item data
self.item_data_queue.put(item_data)
def process_queue(self):
while not self.stop_worker or not self.item_data_queue.empty():
try:
item_data = self.item_data_queue.get(timeout=1)
try:
self._process_item_data(item_data)
except Exception as e:
print(f"Error processing item_data (idx={item_data['idx']}): {e}")
self.item_data_queue.task_done()
except Empty:
continue
def _process_item_data(self, item_data):
idx = item_data['idx']
colors = item_data.get('colors', {})
depths = item_data.get('depths', {})
audios = item_data.get('audios', {})
# Save images
if colors:
for idx_color, (color_key, color) in enumerate(colors.items()):
color_name = f'{str(idx).zfill(6)}_{color_key}.jpg'
if not cv2.imwrite(os.path.join(self.color_dir, color_name), color):
print(f"Failed to save color image.")
item_data['colors'][color_key] = os.path.join('colors', color_name)
# save depths
if depths is not None:
for idx, (_, depth) in enumerate(depths.items()):
depth_key = f'depth_{idx}'
depth_name = f'{str(self.item_id).zfill(6)}_{depth_key}.png'
cv2.imwrite(os.path.join(self.depth_dir, depth_name), depth)
# Save depths
if depths:
for idx_depth, (depth_key, depth) in enumerate(depths.items()):
depth_name = f'{str(idx).zfill(6)}_{depth_key}.jpg'
if not cv2.imwrite(os.path.join(self.depth_dir, depth_name), depth):
print(f"Failed to save depth image.")
item_data['depths'][depth_key] = os.path.join('depths', depth_name)
# save audios
if audios is not None:
# Save audios
if audios:
for mic, audio in audios.items():
audio_name = f'audio_{str(self.item_id).zfill(6)}_{mic}.npy'
audio_name = f'audio_{str(idx).zfill(6)}_{mic}.npy'
np.save(os.path.join(self.audio_dir, audio_name), audio.astype(np.int16))
item_data['audios'][mic] = os.path.join('audios', audio_name)
item_data['states'] = states
item_data['actions'] = actions
item_data['tactiles'] = tactiles
# Update episode data
self.episode_data.append(item_data)
if self.log:
# Log data if necessary
if self.rerun_log:
curent_record_time = time.time()
print(f"==> episode_id:{self.episode_id} item_id:{self.item_id} current_time:{curent_record_time}")
self.online_logger.log_item_data(item_data)
self.rerun_logger.log_item_data(item_data)
def save_episode(self):
"""
with open("./hmm.json",'r',encoding='utf-8') as json_file:
model=json.load(json_file)
"""
# Wait for the queue to be processed
self.item_data_queue.join()
# save
self.data['info'] = self.info
self.data['text'] = self.text
self.data['data'] = self.episode_data
with open(self.json_path,'w',encoding='utf-8') as jsonf:
jsonf.write(json.dumps(self.data, indent=4, ensure_ascii=False))
def close(self):
self.item_data_queue.join()
# Signal the worker thread to stop and join the thread
self.stop_worker = True
self.worker_thread.join()

155
teleop/utils/rerun_visualizer.py

@ -70,31 +70,28 @@ class RerunEpisodeReader:
return audio_data
class RerunLogger:
def __init__(self, prefix = "", IdxRangeBoundary = 30, memory_limit = "200MB"):
rr.init(datetime.now().strftime("Runtime_%Y%m%d_%H%M%S"))
rr.spawn(memory_limit = memory_limit)
def __init__(self, prefix = "", IdxRangeBoundary = 30, memory_limit = None):
self.prefix = prefix
self.IdxRangeBoundary = IdxRangeBoundary
rr.init(datetime.now().strftime("Runtime_%Y%m%d_%H%M%S"))
if memory_limit:
rr.spawn(memory_limit = memory_limit, hide_welcome_screen = True)
else:
rr.spawn(hide_welcome_screen = True)
# Set up blueprint for live visualization
if self.IdxRangeBoundary:
self.setup_blueprint()
def setup_blueprint(self):
views = []
data_plot_paths = [
f"{self.prefix}left_arm",
f"{self.prefix}right_arm",
f"{self.prefix}left_hand",
f"{self.prefix}right_hand"
]
image_plot_paths = [
f"{self.prefix}colors/color_0",
f"{self.prefix}colors/color_1",
f"{self.prefix}colors/color_2",
f"{self.prefix}colors/color_3"
]
views = []
for plot_path in data_plot_paths:
view = rrb.TimeSeriesView(
origin = plot_path,
@ -109,24 +106,32 @@ class RerunLogger:
)
views.append(view)
for plot_path in image_plot_paths:
view = rrb.Spatial2DView(
origin = plot_path,
time_ranges=[
rrb.VisibleTimeRange(
"idx",
start = rrb.TimeRangeBoundary.cursor_relative(seq = -self.IdxRangeBoundary),
end = rrb.TimeRangeBoundary.cursor_relative(),
)
],
)
views.append(view)
# image_plot_paths = [
# f"{self.prefix}colors/color_0",
# f"{self.prefix}colors/color_1",
# f"{self.prefix}colors/color_2",
# f"{self.prefix}colors/color_3"
# ]
# for plot_path in image_plot_paths:
# view = rrb.Spatial2DView(
# origin = plot_path,
# time_ranges=[
# rrb.VisibleTimeRange(
# "idx",
# start = rrb.TimeRangeBoundary.cursor_relative(seq = -self.IdxRangeBoundary),
# end = rrb.TimeRangeBoundary.cursor_relative(),
# )
# ],
# )
# views.append(view)
grid = rrb.Grid(contents = views,
grid_columns=2,
column_shares=[1, 1],
row_shares=[1, 1, 0.5],
row_shares=[1, 1],
)
views.append(rr.blueprint.SelectionPanel(state=rrb.PanelState.Collapsed))
views.append(rr.blueprint.TimePanel(state=rrb.PanelState.Collapsed))
rr.send_blueprint(grid)
@ -149,30 +154,30 @@ class RerunLogger:
for idx, val in enumerate(values):
rr.log(f"{self.prefix}{part}/actions/qpos/{idx}", rr.Scalar(val))
# Log colors (images)
colors = item_data.get('colors', {}) or {}
for color_key, color_val in colors.items():
if color_val is not None:
rr.log(f"{self.prefix}colors/{color_key}", rr.Image(color_val))
# Log depths (images)
depths = item_data.get('depths', {}) or {}
for depth_key, depth_val in depths.items():
if depth_val is not None:
# rr.log(f"{self.prefix}depths/{depth_key}", rr.Image(depth_val))
pass # Handle depth if needed
# Log tactile if needed
tactiles = item_data.get('tactiles', {}) or {}
for hand, tactile_vals in tactiles.items():
if tactile_vals is not None:
pass # Handle tactile if needed
# Log audios if needed
audios = item_data.get('audios', {}) or {}
for audio_key, audio_val in audios.items():
if audio_val is not None:
pass # Handle audios if needed
# # Log colors (images)
# colors = item_data.get('colors', {}) or {}
# for color_key, color_val in colors.items():
# if color_val is not None:
# rr.log(f"{self.prefix}colors/{color_key}", rr.Image(color_val))
# # Log depths (images)
# depths = item_data.get('depths', {}) or {}
# for depth_key, depth_val in depths.items():
# if depth_val is not None:
# # rr.log(f"{self.prefix}depths/{depth_key}", rr.Image(depth_val))
# pass # Handle depth if needed
# # Log tactile if needed
# tactiles = item_data.get('tactiles', {}) or {}
# for hand, tactile_vals in tactiles.items():
# if tactile_vals is not None:
# pass # Handle tactile if needed
# # Log audios if needed
# audios = item_data.get('audios', {}) or {}
# for audio_key, audio_val in audios.items():
# if audio_val is not None:
# pass # Handle audios if needed
def log_episode_data(self, episode_data: list):
for item_data in episode_data:
@ -183,49 +188,57 @@ if __name__ == "__main__":
import gdown
import zipfile
import os
url = "https://drive.google.com/file/d/1f5UuFl1z_gaByg_7jDRj1_NxfJZh2evD/view?usp=sharing"
zip_file = "rerun_testdata.zip"
if not os.path.exists("episode_0006"):
zip_file_download_url = "https://drive.google.com/file/d/1f5UuFl1z_gaByg_7jDRj1_NxfJZh2evD/view?usp=sharing"
unzip_file_output_dir = "./testdata"
if not os.path.exists(os.path.join(unzip_file_output_dir, "episode_0006")):
if not os.path.exists(zip_file):
file_id = url.split('/')[5]
file_id = zip_file_download_url.split('/')[5]
gdown.download(id=file_id, output=zip_file, quiet=False)
print("download ok.")
if not os.path.exists(unzip_file_output_dir):
os.makedirs(unzip_file_output_dir)
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
zip_ref.extractall(".")
zip_ref.extractall(unzip_file_output_dir)
print("uncompress ok.")
os.remove(zip_file)
print("clean file ok.")
else:
print("rerun_testdata exits.")
episode_reader = RerunEpisodeReader(task_dir=".")
episode_data6 = episode_reader.return_episode_data(6)
episode_data8 = episode_reader.return_episode_data(8)
# Example 1: Offline Visualization
user_input = input("Please enter the start signal (enter 'off' to start the subsequent program):\n")
episode_reader = RerunEpisodeReader(task_dir = unzip_file_output_dir)
# TEST EXAMPLE 1 : OFFLINE DATA TEST
user_input = input("Please enter the start signal (enter 'off' or 'on' to start the subsequent program):\n")
if user_input.lower() == 'off':
episode_data6 = episode_reader.return_episode_data(6)
print("Starting offline visualization...")
offline_logger = RerunLogger(prefix="offline/")
offline_logger.log_episode_data(episode_data6)
print("Offline visualization completed.")
# Example 2: Online Visualization with Fixed Time Window
user_input = input("Please enter the start signal (enter 'on' to start the subsequent program):\n")
# TEST EXAMPLE 2 : ONLINE DATA TEST, SLIDE WINDOW SIZE IS 60, MEMORY LIMIT IS 50MB
if user_input.lower() == 'on':
episode_data8 = episode_reader.return_episode_data(8)
print("Starting online visualization with fixed idx size...")
online_logger = RerunLogger(prefix="online/", IdxRangeBoundary = 60)
for item_data in episode_data6:
online_logger.log_item_data(item_data)
time.sleep(0.033) # 30hz
print("Online visualization completed.")
# Example 3: Online Visualization with Fixed Time Window
user_input = input("Please enter the start signal (enter 'on' to start the subsequent program):\n")
if user_input.lower() == 'on':
print("Starting online visualization with fixed idx size...")
online_logger = RerunLogger(prefix="online/", IdxRangeBoundary = 60)
online_logger = RerunLogger(prefix="online/", IdxRangeBoundary = 60, memory_limit='50MB')
for item_data in episode_data8:
online_logger.log_item_data(item_data)
time.sleep(0.033) # 30hz
print("Online visualization completed.")
# # TEST DATA OF data_dir
# data_dir = "./data"
# episode_data_number = 10
# episode_reader2 = RerunEpisodeReader(task_dir = data_dir)
# user_input = input("Please enter the start signal (enter 'on' to start the subsequent program):\n")
# episode_data8 = episode_reader2.return_episode_data(episode_data_number)
# if user_input.lower() == 'on':
# # Example 2: Offline Visualization with Fixed Time Window
# print("Starting offline visualization with fixed idx size...")
# online_logger = RerunLogger(prefix="offline/", IdxRangeBoundary = 60)
# for item_data in episode_data8:
# online_logger.log_item_data(item_data)
# time.sleep(0.033) # 30hz
# print("Offline visualization completed.")
Loading…
Cancel
Save