You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

182 lines
5.9 KiB

# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from io import BytesIO
from typing import Any, Callable, Dict
import torch
import zmq
class TorchSerializer:
@staticmethod
def to_bytes(data: dict) -> bytes:
buffer = BytesIO()
torch.save(data, buffer)
return buffer.getvalue()
@staticmethod
def from_bytes(data: bytes) -> dict:
buffer = BytesIO(data)
obj = torch.load(buffer, weights_only=False)
return obj
@dataclass
class EndpointHandler:
handler: Callable
requires_input: bool = True
class BaseInferenceServer:
"""
An inference server that spin up a ZeroMQ socket and listen for incoming requests.
Can add custom endpoints by calling `register_endpoint`.
"""
def __init__(self, host: str = "*", port: int = 5555):
self.running = True
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REP)
self.socket.bind(f"tcp://{host}:{port}")
self._endpoints: dict[str, EndpointHandler] = {}
# Register the ping endpoint by default
self.register_endpoint("ping", self._handle_ping, requires_input=False)
self.register_endpoint("kill", self._kill_server, requires_input=False)
def _kill_server(self):
"""
Kill the server.
"""
self.running = False
def _handle_ping(self) -> dict:
"""
Simple ping handler that returns a success message.
"""
return {"status": "ok", "message": "Server is running"}
def register_endpoint(self, name: str, handler: Callable, requires_input: bool = True):
"""
Register a new endpoint to the server.
Args:
name: The name of the endpoint.
handler: The handler function that will be called when the endpoint is hit.
requires_input: Whether the handler requires input data.
"""
self._endpoints[name] = EndpointHandler(handler, requires_input)
def run(self):
addr = self.socket.getsockopt_string(zmq.LAST_ENDPOINT)
print(f"Server is ready and listening on {addr}")
while self.running:
try:
message = self.socket.recv()
request = TorchSerializer.from_bytes(message)
endpoint = request.get("endpoint", "get_action")
if endpoint not in self._endpoints:
raise ValueError(f"Unknown endpoint: {endpoint}")
handler = self._endpoints[endpoint]
result = (
handler.handler(request.get("data", {}))
if handler.requires_input
else handler.handler()
)
self.socket.send(TorchSerializer.to_bytes(result))
except Exception as e:
print(f"Error in server: {e}")
import traceback
print(traceback.format_exc())
self.socket.send(b"ERROR")
class BaseInferenceClient:
def __init__(self, host: str = "localhost", port: int = 5555, timeout_ms: int = 15000):
self.context = zmq.Context()
self.host = host
self.port = port
self.timeout_ms = timeout_ms
self._init_socket()
def _init_socket(self):
"""Initialize or reinitialize the socket with current settings"""
self.socket = self.context.socket(zmq.REQ)
self.socket.connect(f"tcp://{self.host}:{self.port}")
def ping(self) -> bool:
try:
self.call_endpoint("ping", requires_input=False)
return True
except zmq.error.ZMQError:
self._init_socket() # Recreate socket for next attempt
return False
def kill_server(self):
"""
Kill the server.
"""
self.call_endpoint("kill", requires_input=False)
def call_endpoint(
self, endpoint: str, data: dict | None = None, requires_input: bool = True
) -> dict:
"""
Call an endpoint on the server.
Args:
endpoint: The name of the endpoint.
data: The input data for the endpoint.
requires_input: Whether the endpoint requires input data.
"""
request: dict = {"endpoint": endpoint}
if requires_input:
request["data"] = data
self.socket.send(TorchSerializer.to_bytes(request))
message = self.socket.recv()
if message == b"ERROR":
raise RuntimeError("Server error")
return TorchSerializer.from_bytes(message)
def __del__(self):
"""Cleanup resources on destruction"""
self.socket.close()
self.context.term()
class ExternalRobotInferenceClient(BaseInferenceClient):
"""
Client for communicating with the RealRobotServer
"""
def set_observation(self, observation: dict[str, Any]):
self.call_endpoint("set_observation", data=observation)
def get_action(self, time: float | None = None) -> Dict[str, Any]:
"""
Get the action from the server.
The exact definition of the observations is defined
by the policy, which contains the modalities configuration.
"""
return self.call_endpoint("get_action", data={"time": time})
def get_modality_config(self) -> dict[str, Any]:
return self.call_endpoint("get_modality_config")