Load LeIsaac directly in LeRobot with one line of code
EnvHub: Share LeIsaac environments through HuggingFace
EnvHub is Hugging Face’s reproducible environment hub, spin up a packaged simulation with one line, experiment immediately, and publish your own tasks for the community.
LeIsaac offers EnvHub support so you can consume or share tasks with only a few commands.
Environment Setup
Run the following commands to setup your code environments:
# Refer to Getting Started/Installation to install leisaac firstly
conda create -n leisaac_envhub python=3.11
conda activate leisaac_envhub
conda install -c "nvidia/label/cuda-12.8.1" cuda-toolkit
pip install -U torch==2.7.0 torchvision==0.22.0 --index-url https://download.pytorch.org/whl/cu128
pip install 'leisaac[isaaclab] @ git+https://github.com/LightwheelAI/leisaac.git#subdirectory=source/leisaac' --extra-index-url https://pypi.nvidia.com
# Install lerobot
pip install lerobot==0.4.1
# Fix numpy version
pip install numpy==1.26.0
Usage Example
EnvHub exposes every LeIsaac-supported task in a uniform interface. The examples below load so101_pick_orange and demonstrate a random-action rollout and an interactive teleoperation.
Random Action
Example code for envhub_random_action.py
# envhub_random_action.py
import torch
from lerobot.envs.factory import make_env
# Load from the hub
envs_dict = make_env("LightwheelAI/leisaac_env:envs/so101_pick_orange.py", n_envs=1, trust_remote_code=True)
# Access the environment
suite_name = next(iter(envs_dict))
sync_vector_env = envs_dict[suite_name][0]
# retrieve the isaac environment from the sync vector env
env = sync_vector_env.envs[0].unwrapped
# Use it like any gym environment
obs, info = env.reset()
while True:
action = torch.tensor(env.action_space.sample())
obs, reward, terminated, truncated, info = env.step(action)
if terminated or truncated:
obs, info = env.reset()
env.close()
python envhub_random_action.py
You should see the SO101 arm swinging under purely random commands.
Teleoperation
LeRobot’s teleoperation stack can drive the simulated arm.
Connect the SO101 Leader controller, run the calibration command below.
lerobot-calibrate \
--teleop.type=so101_leader \
--teleop.port=/dev/ttyACM0 \
--teleop.id=leader
And then launch the teleop script.
Example code for envhub_teleop_example.py
# envhub_teleop_example.py
import logging
import time
import gymnasium as gym
from dataclasses import asdict, dataclass
from pprint import pformat
from lerobot.teleoperators import ( # noqa: F401
Teleoperator,
TeleoperatorConfig,
make_teleoperator_from_config,
so101_leader,
)
from lerobot.utils.robot_utils import busy_wait
from lerobot.utils.utils import init_logging
from lerobot.envs.factory import make_env
@dataclass
class TeleoperateConfig:
teleop: TeleoperatorConfig
env_name: str = "so101_pick_orange"
fps: int = 60
@dataclass
class EnvWrap:
env: gym.Env
def make_env_from_leisaac(env_name: str = "so101_pick_orange"):
envs_dict = make_env(
f'LightwheelAI/leisaac_env:envs/{env_name}.py',
n_envs=1,
trust_remote_code=True
)
suite_name = next(iter(envs_dict))
sync_vector_env = envs_dict[suite_name][0]
env = sync_vector_env.envs[0].unwrapped
return env
def teleop_loop(teleop: Teleoperator, env: gym.Env, fps: int):
from leisaac.devices.action_process import preprocess_device_action
from leisaac.assets.robots.lerobot import SO101_FOLLOWER_MOTOR_LIMITS
from leisaac.utils.env_utils import dynamic_reset_gripper_effort_limit_sim
env_wrap = EnvWrap(env=env)
obs, info = env.reset()
while True:
loop_start = time.perf_counter()
if env.cfg.dynamic_reset_gripper_effort_limit:
dynamic_reset_gripper_effort_limit_sim(env, 'so101leader')
raw_action = teleop.get_action()
processed_action = preprocess_device_action(
dict(
so101_leader=True,
joint_state={
k.removesuffix(".pos"): v for k, v in raw_action.items()},
motor_limits=SO101_FOLLOWER_MOTOR_LIMITS),
env_wrap
)
obs, reward, terminated, truncated, info = env.step(processed_action)
if terminated or truncated:
obs, info = env.reset()
dt_s = time.perf_counter() - loop_start
busy_wait(1 / fps - dt_s)
loop_s = time.perf_counter() - loop_start
print(f"\ntime: {loop_s * 1e3:.2f}ms ({1 / loop_s:.0f} Hz)")
def teleoperate(cfg: TeleoperateConfig):
init_logging()
logging.info(pformat(asdict(cfg)))
teleop = make_teleoperator_from_config(cfg.teleop)
env = make_env_from_leisaac(cfg.env_name)
teleop.connect()
if hasattr(env, 'initialize'):
env.initialize()
try:
teleop_loop(teleop=teleop, env=env, fps=cfg.fps)
except KeyboardInterrupt:
pass
finally:
teleop.disconnect()
env.close()
def main():
teleoperate(TeleoperateConfig(
teleop=so101_leader.SO101LeaderConfig(
port="/dev/ttyACM0",
id='leader',
use_degrees=False,
),
env_name="so101_pick_orange",
fps=60,
))
if __name__ == "__main__":
main()
python envhub_teleop_example.py
Running the script lets you operate the simulated arm using the physical Leader device.
Additional Notes
We keep EnvHub coverage aligned with the LeIsaac task. Currently supported:
so101_pick_orangeso101_lift_cubeso101_clean_toytablebi_so101_fold_cloth
Refer to here for our available environments.
Switch tasks by targeting a different script when calling make_env, for example:
envs_dict_pick_orange = make_env("LightwheelAI/leisaac_env:envs/so101_pick_orange.py", n_envs=1, trust_remote_code=True)
envs_dict_lift_cube = make_env("LightwheelAI/leisaac_env:envs/so101_lift_cube.py", n_envs=1, trust_remote_code=True)
envs_dict_clean_toytable = make_env("LightwheelAI/leisaac_env:envs/so101_clean_toytable.py", n_envs=1, trust_remote_code=True)
envs_dict_fold_cloth = make_env("LightwheelAI/leisaac_env:envs/bi_so101_fold_cloth.py", n_envs=1, trust_remote_code=True)
Note: when working with bi_so101_fold_cloth, call initialize() immediately after retrieving the env before performing any other operations:
import torch
from lerobot.envs.factory import make_env
# Load from the hub
envs_dict = make_env("LightwheelAI/leisaac_env:envs/bi_so101_fold_cloth.py", n_envs=1, trust_remote_code=True)
# Access the environment
suite_name = next(iter(envs_dict))
sync_vector_env = envs_dict[suite_name][0]
# retrieve the isaac environment from the sync vector env
env = sync_vector_env.envs[0].unwrapped
# NOTE: initialize() first
env.initialize()
# other operation with env...