diff --git a/.github/workflows/black.yml b/.github/workflows/black.yml index 0bdbaaa8e..98b2a668d 100644 --- a/.github/workflows/black.yml +++ b/.github/workflows/black.yml @@ -7,4 +7,4 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - - uses: psf/black@21.12b0 \ No newline at end of file + - uses: psf/black@stable \ No newline at end of file diff --git a/.github/workflows/build-containers.yml b/.github/workflows/build-containers.yml deleted file mode 100644 index 861bee40c..000000000 --- a/.github/workflows/build-containers.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: Build and Push Containers - -on: - release: - types: - - created - workflow_dispatch: - -env: - REGISTRY_USER: igibson - IMAGE_REGISTRY: docker.io - REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }} - -jobs: - push_to_registry: - name: Push images to docker.io - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Log in to docker.io - uses: redhat-actions/podman-login@v1 - with: - username: ${{ env.REGISTRY_USER }} - password: ${{ env.REGISTRY_PASSWORD }} - registry: ${{ env.IMAGE_REGISTRY }} - - - name: install podman-compose - run: pip3 install https://github.com/containers/podman-compose/archive/devel.tar.gz - - - name: Build and push images to quay - run: | - cd docker - podman-compose -f compose-build.yml build - podman-compose -f compose-build.yml push diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml deleted file mode 100644 index 12d4a1c8e..000000000 --- a/.github/workflows/docs.yml +++ /dev/null @@ -1,32 +0,0 @@ -name: Build & deploy docs - -on: [push] - -jobs: - build-docs: - name: Build and deploy documentation - runs-on: ubuntu-latest - if: github.repository == 'StanfordVL/iGibson' && github.ref == 'refs/heads/master' - - steps: - - name: Checkout code - uses: actions/checkout@v2 - - - name: Setup python - uses: actions/setup-python@v2 - with: - python-version: "3.8" - architecture: x64 - - - name: Install dev requirements - run: pip install -r requirements-dev.txt - - - name: Generate docs - working-directory: ./docs - run: make html - - - name: Deploy to gh-pages - uses: peaceiris/actions-gh-pages@v3 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: ./docs/_build/html diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index 27812afcb..000000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: Release on PyPI - -on: - release: - types: - - created - -jobs: - build: - runs-on: "ubuntu-latest" - steps: - - uses: actions/checkout@master - - name: Set up Python 3.8 - uses: actions/setup-python@v1 - with: - python-version: 3.8 - - name: Install pypa/build - run: >- - python -m - pip install - build - --user - - name: Build a binary wheel and a source tarball - run: >- - python -m - build - --sdist - --outdir dist/ - . - - name: Publish a Python distribution to PyPI - if: github.repository == 'StanfordVL/iGibson' && startsWith(github.ref, 'refs/tags') - uses: pypa/gh-action-pypi-publish@release/v1 - with: - user: __token__ - password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/.github/workflows/sync-repos.yml b/.github/workflows/sync-repos.yml deleted file mode 100644 index de70e0b3c..000000000 --- a/.github/workflows/sync-repos.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: Sync iGibson-dev/master to iGibson/master - -on: - workflow_dispatch: - schedule: - # Sync repositories on the 1st and 15th of the month at 9 AM - - cron: "0 17 1,15 * *" - -jobs: - sync-private-fork: - runs-on: ubuntu-latest - if: github.ref == 'refs/heads/master' - steps: - - uses: actions/checkout@v2 - - name: Sync iGibson-dev/master to iGibson/master - if: github.repository == 'StanfordVL/iGibson-dev' && startsWith(github.ref, 'refs/tags') - run: - git push https://$PERSONAL_ACCESS_TOKEN:x-oauth-basic@github.com/StanfordVL/iGibson.git master:master diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml deleted file mode 100644 index 41d368318..000000000 --- a/.github/workflows/tests.yml +++ /dev/null @@ -1,85 +0,0 @@ -name: Tests - -on: [pull_request] - -concurrency: - group: ${{ github.workflow }}-${{ github.event_name == 'pull_request' && github.head_ref || github.sha }} - cancel-in-progress: true - -jobs: - test: - runs-on: [self-hosted, linux, gpu] - if: github.repository == 'StanfordVL/iGibson-dev' - - steps: - - name: Checkout source - uses: actions/checkout@v2 - with: - submodules: true - path: igibson - - - name: Add CUDA to env - run: echo "/usr/local/cuda/bin" >> $GITHUB_PATH - - - name: Setup python - uses: actions/setup-python@v2 - with: - python-version: "3.8" - architecture: x64 - - - name: Install dev requirements - working-directory: igibson - run: pip install -r requirements-dev.txt - - - name: Install - working-directory: igibson - run: pip install -e . - - - name: Checkout BDDL - uses: actions/checkout@v2 - with: - repository: StanfordVL/bddl-dev - ref: 581be50e7cfd2b3a1447aaa1b4fc2424b673339c - token: ${{ secrets.PERSONAL_ACCESS_TOKEN }} # PAT is required since this is a different repo - path: bddl - submodules: recursive - lfs: true - - - name: Install BDDL - working-directory: bddl - run: pip install -e . - - - name: Link Dataset - working-directory: igibson - run: ln -s /scr/ig-data igibson/data - - # The below method of checking out ig-dataset is currently unused due to poor speed. - # - name: Create data directory - # run: mkdir -p igibson/igibson/data - # - # - name: Checkout ig_dataset - # uses: actions/checkout@v2 - # with: - # repository: StanfordVL/ig_dataset - # token: ${{ secrets.PERSONAL_ACCESS_TOKEN }} # PAT is required since this is a different repo - # path: igibson/igibson/data/ig_dataset - # submodules: recursive - # lfs: true - # - # - name: Checkout ig_assets - # uses: actions/checkout@v2 - # with: - # repository: StanfordVL/ig_assets - # token: ${{ secrets.PERSONAL_ACCESS_TOKEN }} # PAT is required since this is a different repo - # path: igibson/igibson/data/assets - # submodules: recursive - # lfs: true - - - name: Run tests - working-directory: igibson - run: pytest - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v2.1.0 - with: - token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.gitignore b/.gitignore index 8efbad282..fe2169e29 100644 --- a/.gitignore +++ b/.gitignore @@ -81,6 +81,8 @@ igibson/utils/data_utils/mesh_decimation/final_videos # libcryptopp igibson/render/mesh_renderer/libcryptopp.so.8.6 -# Coverage -.coverage -coverage.xml +# igibson run json files +*my_igibson_run.json + +# Temp modifiable file. +igibson/examples/configs/*yaml* diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c616f2c07..9cea96e6c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/psf/black - rev: 21.12b0 + rev: stable # Replace by any tag/version: https://github.com/psf/black/tags hooks: - id: black language_version: python3 # Should be a command that runs python3.6+ diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 000000000..f57094a94 --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,89 @@ +pipeline { + + agent { + docker { + image 'gibsonchallenge/gibsonv2:jenkins2' + args '--runtime=nvidia -v ${WORKSPACE}/../ig_dataset:${WORKSPACE}/igibson/data/ig_dataset' + } + } + + stages { + stage('Build') { + steps { + sh 'nvidia-smi' + sh 'pwd' + sh 'printenv' + sh 'pip install -e .' + sh 'sudo chown -R jenkins ${WORKSPACE}/igibson/data/' + } + } + + stage('Build Docs') { + steps { + sh 'sphinx-apidoc -o docs/apidoc igibson igibson/external igibson/utils/data_utils/' + sh 'sphinx-build -b html docs _sites' + } + } + + stage('Test') { + steps { + sh 'mkdir result' + sh 'pytest igibson/test/test_binding.py --junitxml=test_result/test_binding.py.xml' + sh 'pytest igibson/test/test_render.py --junitxml=test_result/test_render.py.xml' + sh 'pytest igibson/test/test_pbr.py --junitxml=test_result/test_pbr.py.xml' + sh 'pytest igibson/test/test_object.py --junitxml=test_result/test_object.py.xml' + sh 'pytest igibson/test/test_simulator.py --junitxml=test_result/test_simulator.py.xml' + sh 'pytest igibson/test/test_igibson_env.py --junitxml=test_result/test_igibson_env.py.xml' + sh 'pytest igibson/test/test_scene_importing.py --junitxml=test_result/test_scene_importing.py.xml' + sh 'pytest igibson/test/test_robot.py --junitxml=test_result/test_robot.py.xml' + sh 'pytest igibson/test/test_igsdf_scene_importing.py --junitxml=test_result/test_igsdf_scene_importing.py.xml' + sh 'pytest igibson/test/test_sensors.py --junitxml=test_result/test_sensors.py.xml' + sh 'pytest igibson/test/test_motion_planning.py --junitxml=test_result/test_motion_planning.py.xml' + sh 'pytest igibson/test/test_states.py --junitxml=test_result/test_states.py.xml' + sh 'pytest igibson/test/test_determinism_against_same_version.py --junitxml=test_result/test_determinism_against_same_version.py.xml' + } + } + + stage('Benchmark') { + steps { + sh 'python igibson/test/benchmark/benchmark_static_scene.py' + sh 'python igibson/test/benchmark/benchmark_interactive_scene.py' + } + } + + } + post { + always { + junit 'test_result/*.xml' + archiveArtifacts artifacts: 'test_result/*.xml', fingerprint: true + archiveArtifacts artifacts: '*.pdf' + archiveArtifacts artifacts: '*.png' + + publishHTML (target: [ + allowMissing: true, + alwaysLinkToLastBuild: false, + keepAll: true, + reportDir: '_sites', + reportFiles: 'index.html', + includes: '**/*', + reportName: "iGibson docs" + ]) + + cleanWs() + } + failure { + script { + // Send an email only if the build status has changed from green/unstable to red + emailext subject: '$DEFAULT_SUBJECT', + body: '$DEFAULT_CONTENT', + recipientProviders: [ + [$class: 'CulpritsRecipientProvider'], + [$class: 'DevelopersRecipientProvider'], + [$class: 'RequesterRecipientProvider'] + ], + replyTo: '$DEFAULT_REPLYTO', + to: '$DEFAULT_RECIPIENTS' + } + } + } +} diff --git a/README.md b/README.md index cf99216f7..2a0fb6eec 100644 --- a/README.md +++ b/README.md @@ -27,24 +27,24 @@ iGibson is a simulation environment providing fast visual rendering and physics If you use iGibson or its assets and models, consider citing the following publication: ``` -@misc{li2021igibson, - title={iGibson 2.0: Object-Centric Simulation for Robot Learning of Everyday Household Tasks}, - author={Chengshu Li and Fei Xia and Roberto Mart\'in-Mart\'in and Michael Lingelbach and Sanjana Srivastava and Bokui Shen and Kent Vainio and Cem Gokmen and Gokul Dharan and Tanish Jain and Andrey Kurenkov and Karen Liu and Hyowon Gweon and Jiajun Wu and Li Fei-Fei and Silvio Savarese}, +@misc{shen2021igibson, + title={iGibson 1.0: a Simulation Environment for Interactive Tasks in Large Realistic Scenes}, + author={Bokui Shen and Fei Xia and Chengshu Li and Roberto Martín-Martín and Linxi Fan and Guanzhi Wang and Claudia Pérez-D'Arpino and Shyamal Buch and Sanjana Srivastava and Lyne P. Tchapmi and Micael E. Tchapmi and Kent Vainio and Josiah Wong and Li Fei-Fei and Silvio Savarese}, year={2021}, - eprint={2108.03272}, + eprint={2012.02924}, archivePrefix={arXiv}, - primaryClass={cs.RO} + primaryClass={cs.AI} } ``` ``` -@inproceedings{shen2021igibson, - title={iGibson 1.0: a Simulation Environment for Interactive Tasks in Large Realistic Scenes}, - author={Bokui Shen and Fei Xia and Chengshu Li and Roberto Mart\'in-Mart\'in and Linxi Fan and Guanzhi Wang and Claudia P\'erez-D'Arpino and Shyamal Buch and Sanjana Srivastava and Lyne P. Tchapmi and Micael E. Tchapmi and Kent Vainio and Josiah Wong and Li Fei-Fei and Silvio Savarese}, - booktitle={2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, +@misc{li2021igibson, + title={iGibson 2.0: Object-Centric Simulation for Robot Learning of Everyday Household Tasks}, + author={Chengshu Li and Fei Xia and Roberto Martín-Martín and Michael Lingelbach and Sanjana Srivastava and Bokui Shen and Kent Vainio and Cem Gokmen and Gokul Dharan and Tanish Jain and Andrey Kurenkov and Karen Liu and Hyowon Gweon and Jiajun Wu and Li Fei-Fei and Silvio Savarese}, year={2021}, - pages={accepted}, - organization={IEEE} + eprint={2108.03272}, + archivePrefix={arXiv}, + primaryClass={cs.RO} } ``` @@ -57,9 +57,9 @@ If you want to know more about iGibson, you can also check out [our webpage](htt For instructions to install iGibson and download dataset, you can visit [installation guide](http://svl.stanford.edu/igibson/docs/installation.html) and [dataset download guide](http://svl.stanford.edu/igibson/docs/dataset.html). -There are other datasets we link to iGibson. We include support to use CubiCasa5K and 3DFront scenes, adding up more than 10000 extra interactive scenes to use in iGibson! Check our [documentation](https://github.com/StanfordVL/iGibson/tree/master/igibson/utils/data_utils/ext_scene) on how to use those. +There are other datasets we link to iGibson. We include support to use CubiCasa5K and 3DFront scenes, adding up more than 8000 extra interactive scenes to use in iGibson! Check our documentation on how to use those. -We also maintain compatibility with datasets of 3D reconstructed large real-world scenes (homes and offices) that you can download and use with iGibson. For Gibson Dataset and Stanford 2D-3D-Semantics Dataset, please fill out this [form](https://forms.gle/36TW9uVpjrE1Mkf9A). For Matterport3D Dataset, please fill in this [form](http://dovahkiin.stanford.edu/matterport/public/MP_TOS.pdf) and send it to [matterport3d@googlegroups.com](mailto:matterport3d@googlegroups.com). Please put "use with iGibson simulator" in your email. Check our [dataset download guide](http://svl.stanford.edu/igibson/docs/dataset.html) for more details. +We also maintain compatibility with datasets of 3D reconstructed large real-world scenes (homes and offices) that you can download and use with iGibson, for example from our previous simulator, Gibson. All of them will be accessible once you fill in this [form]. ### Using iGibson with VR If you want to use iGibson VR interface, please visit the [VR guide (TBA)]. diff --git a/docker/.env b/docker/.env deleted file mode 100644 index d98efa377..000000000 --- a/docker/.env +++ /dev/null @@ -1,3 +0,0 @@ -REGISTRY=docker.io -REPO=igibson -VERSION=v2.0.4 diff --git a/docker/tensorflow/Dockerfile b/docker/base/Dockerfile similarity index 100% rename from docker/tensorflow/Dockerfile rename to docker/base/Dockerfile diff --git a/docker/tensorflow/build.sh b/docker/base/build.sh similarity index 100% rename from docker/tensorflow/build.sh rename to docker/base/build.sh diff --git a/docker/tensorflow/run.sh b/docker/base/run.sh similarity index 100% rename from docker/tensorflow/run.sh rename to docker/base/run.sh diff --git a/docker/behavior/Dockerfile b/docker/behavior/Dockerfile index 39668af25..33201887f 100644 --- a/docker/behavior/Dockerfile +++ b/docker/behavior/Dockerfile @@ -1,8 +1,38 @@ -ARG REGISTRY=docker.io -ARG REPO=igibson -ARG VERSION=v2.0.4 +FROM nvidia/cudagl:11.1.1-devel-ubuntu20.04 -FROM $REGISTRY/$REPO:$VERSION +ARG DEBIAN_FRONTEND=noninteractive -RUN git clone --depth 1 --branch master https://github.com/StanfordVL/bddl /opt/bddl --recursive -RUN pip install --no-cache-dir -e /opt/bddl +RUN apt-get update && apt-get install -y --no-install-recommends \ + cmake \ + git \ + vim \ + wget \ + curl \ + python3-dev \ + python3-opencv \ + python3-pip && \ + rm -rf /var/lib/apt/lists/* + +RUN curl -LO http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh +RUN bash Miniconda-latest-Linux-x86_64.sh -p /miniconda -b +RUN rm Miniconda-latest-Linux-x86_64.sh +ENV PATH=/miniconda/bin:${PATH} +RUN conda update -y conda +RUN conda create -y -n igibson python=3.8.0 + +ENV PATH /miniconda/envs/igibson/bin:$PATH + +RUN pip install torch pytest ray[default,rllib] stable-baselines3 + +RUN git clone --branch ig-develop https://github.com/StanfordVL/iGibson /opt/iGibson --recursive +WORKDIR /opt/iGibson +RUN pip install -e . + +RUN git clone --branch master https://github.com/StanfordVL/BDDL /opt/BDDL --recursive +WORKDIR /opt/BDDL +RUN pip install -e . + +RUN python3 -m igibson.utils.assets_utils --download_assets +RUN python3 -m igibson.utils.assets_utils --download_demo_data + +WORKDIR /opt/iGibson/igibson diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml deleted file mode 100644 index 512092dcf..000000000 --- a/docker/docker-compose.yml +++ /dev/null @@ -1,25 +0,0 @@ -version: "3.7" - -services: - igibson: - image: ${REGISTRY}/${REPO}:${VERSION} - build: - context: ./igibson - dockerfile: Dockerfile - - igibson-tensorflow: - image: ${REGISTRY}/${REPO}:${VERSION}-tensorflow - build: - context: ./tensorflow - dockerfile: Dockerfile - - behavior: - image: ${REGISTRY}/${REPO}:${VERSION}-behavior - build: - context: ./behavior - dockerfile: Dockerfile - args: - - VERSION=${VERSION} - - REGISTRY=${REGISTRY} - - REPO=${REPO} - diff --git a/docker/headless-gui/Dockerfile b/docker/headless-gui/Dockerfile new file mode 100644 index 000000000..9e894a7d0 --- /dev/null +++ b/docker/headless-gui/Dockerfile @@ -0,0 +1,25 @@ +FROM igibson/igibson:latest + +# add dummy display and remote GUI via x11VNC + +RUN DEBIAN_FRONTEND=noninteractive apt install -y \ + xserver-xorg-video-dummy \ + xfce4 desktop-base \ + x11vnc net-tools +# disable screensaver +RUN apt autoremove -y xscreensaver + +# optional: if you want a richer desktop experience +RUN DEBIAN_FRONTEND=noninteractive apt install -y \ + xfce4-terminal firefox +RUN echo 2 | update-alternatives --config x-terminal-emulator +# ==== end of optional ===== + +RUN mkdir -p /opt/misc /opt/logs +COPY x-dummy.conf /opt/misc +COPY entrypoint.sh /opt/misc + +ENV QT_X11_NO_MITSHM=1 +ENV DISPLAY=:0 +WORKDIR /opt/igibson/igibson/examples/demo +ENTRYPOINT ["/opt/misc/entrypoint.sh"] diff --git a/docker/headless-gui/build.sh b/docker/headless-gui/build.sh new file mode 100755 index 000000000..cf79d7589 --- /dev/null +++ b/docker/headless-gui/build.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +IMAGE=igibson/igibson-gui + +export DOCKER_BUILDKIT=1 +docker build -t $IMAGE:v1.0.1 . \ + && docker tag $IMAGE:v1.0.1 $IMAGE:latest \ + && echo BUILD SUCCESSFUL diff --git a/docker/headless-gui/entrypoint.sh b/docker/headless-gui/entrypoint.sh new file mode 100755 index 000000000..a8ef16a4e --- /dev/null +++ b/docker/headless-gui/entrypoint.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +VNC_PASSWORD=${VNC_PASSWORD:-112358} + +# start X server with dummy display on :0 +X -config /opt/misc/x-dummy.conf > /opt/logs/x-dummy.log 2>&1 & + +sleep 2 + +# start xcfe desktop +startxfce4 > /opt/logs/xcfe4.log 2>&1 & + +# start x11VNC server. Must publish port 5900 at `docker run` +x11vnc -display :0 -noxrecord -noxfixes -noxdamage -forever -passwd $VNC_PASSWORD > /opt/logs/x11vnc.log 2>&1 & + +"$@" diff --git a/docker/headless-gui/run.sh b/docker/headless-gui/run.sh new file mode 100755 index 000000000..7957845bc --- /dev/null +++ b/docker/headless-gui/run.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +VNC_PORT=5900 +VNC_PASSWORD=112358 + +PARAMS= +while (( "$#" )); do + case "$1" in + -p|--vnc-port) + VNC_PORT=$2 + shift 2 + ;; + -pw|--vnc-password) + VNC_PASSWORD=$2 + shift 2 + ;; + --) # end argument parsing + shift + break + ;; + -*|--*=) # unsupported flags + echo "Error: Unsupported flag $1" >&2 + exit 1 + ;; + *) # preserve positional arguments + PARAMS="$PARAMS $1" + shift + ;; + esac +done + +echo Starting VNC server on port $VNC_PORT with password $VNC_PASSWORD +echo please run \"python simulator_example.py\" once you see the docker command prompt: +docker run --gpus all -ti -p $VNC_PORT:5900 -e VNC_PASSWORD=$VNC_PASSWORD --rm igibson/igibson-gui:latest bash \ No newline at end of file diff --git a/docker/headless-gui/x-dummy.conf b/docker/headless-gui/x-dummy.conf new file mode 100644 index 000000000..de4648bef --- /dev/null +++ b/docker/headless-gui/x-dummy.conf @@ -0,0 +1,25 @@ +Section "Monitor" + Identifier "Monitor0" + HorizSync 28.0-80.0 + VertRefresh 48.0-75.0 + # https://arachnoid.com/modelines/ + # 1920x1080 @ 60.00 Hz (GTF) hsync: 67.08 kHz; pclk: 172.80 MHz + Modeline "1920x1080_60.00" 172.80 1920 2040 2248 2576 1080 1081 1084 1118 -HSync +Vsync +EndSection + +Section "Device" + Identifier "Card0" + Driver "dummy" + VideoRam 256000 +EndSection + +Section "Screen" + DefaultDepth 24 + Identifier "Screen0" + Device "Card0" + Monitor "Monitor0" + SubSection "Display" + Depth 24 + Modes "1920x1080_60.00" + EndSubSection +EndSection \ No newline at end of file diff --git a/docker/igibson/Dockerfile b/docker/igibson/Dockerfile deleted file mode 100644 index cbf6e52b7..000000000 --- a/docker/igibson/Dockerfile +++ /dev/null @@ -1,41 +0,0 @@ -FROM nvidia/cudagl:11.3.1-base-ubuntu20.04 - -ARG DEBIAN_FRONTEND=noninteractive - -RUN apt-get update && apt-get install -y --no-install-recommends \ - cmake \ - curl \ - g++ \ - git \ - make \ - vim \ - wget \ - cuda-command-line-tools-11-3 && \ - rm -rf /var/lib/apt/lists/* - -RUN curl -LO http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -RUN bash Miniconda-latest-Linux-x86_64.sh -p /miniconda -b -RUN rm Miniconda-latest-Linux-x86_64.sh -ENV PATH=/miniconda/bin:${PATH} - -RUN conda update -y conda -RUN conda create -y -n igibson python=3.8.0 - -ENV PATH /miniconda/envs/igibson/bin:$PATH - -# NOTE: This needs to be updated in-step with the base cudagl image so the tensor renderer works -RUN pip install torch==1.10.0+cu113 torchvision==0.11.1+cu113 torchaudio==0.10.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html --no-cache-dir - -RUN git clone --depth 1 https://github.com/StanfordVL/iGibson /opt/iGibson --recursive && \ - rm -rf /opt/iGibson/igibson/render/openvr/samples - -RUN pip install --no-cache-dir -e . - -RUN pip install --no-cache-dir pytest ray[default,rllib] stable-baselines3 && rm -rf /root/.cache - -WORKDIR /opt/iGibson - -RUN python3 -m igibson.utils.assets_utils --download_assets && rm -rf /tmp/* -RUN python3 -m igibson.utils.assets_utils --download_demo_data && rm -rf /tmp/* - -WORKDIR /opt/igibson/igibson/examples/demo diff --git a/docker/igibson2/Dockerfile b/docker/igibson2/Dockerfile new file mode 100644 index 000000000..82b66c33c --- /dev/null +++ b/docker/igibson2/Dockerfile @@ -0,0 +1,34 @@ +FROM nvidia/cudagl:11.1.1-devel-ubuntu20.04 + +ARG DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && apt-get install -y --no-install-recommends \ + cmake \ + git \ + wget \ + curl \ + python3-dev \ + python3-opencv \ + python3-pip && \ + rm -rf /var/lib/apt/lists/* + +RUN curl -LO http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh +RUN bash Miniconda-latest-Linux-x86_64.sh -p /miniconda -b +RUN rm Miniconda-latest-Linux-x86_64.sh +ENV PATH=/miniconda/bin:${PATH} +RUN conda update -y conda +RUN conda create -y -n igibson python=3.8.0 + +ENV PATH /miniconda/envs/igibson/bin:$PATH + +RUN pip install --no-cache-dir https://github.com/StanfordVL/bullet3/archive/master.zip +RUN pip install torch pytest ray[default,rllib] stable-baselines3 + +RUN git clone --branch master https://github.com/StanfordVL/iGibson /opt/igibson --recursive +WORKDIR /opt/igibson +RUN pip install -e . + +RUN python3 -m igibson.utils.assets_utils --download_assets +RUN python3 -m igibson.utils.assets_utils --download_demo_data + +WORKDIR /opt/igibson/igibson/examples/demo diff --git a/docker/igibson/build.sh b/docker/igibson2/build.sh similarity index 100% rename from docker/igibson/build.sh rename to docker/igibson2/build.sh diff --git a/docker/igibson/run.sh b/docker/igibson2/run.sh similarity index 100% rename from docker/igibson/run.sh rename to docker/igibson2/run.sh diff --git a/docker/jenkins/Dockerfile b/docker/jenkins/Dockerfile new file mode 100644 index 000000000..d63f5e1de --- /dev/null +++ b/docker/jenkins/Dockerfile @@ -0,0 +1,54 @@ +from nvidia/cudagl:10.0-base-ubuntu18.04 + +ARG CUDA=10.0 +ARG CUDNN=7.6.2.24-1 + +RUN apt-get update && \ + apt-get -y install sudo + +ARG USER_ID +ARG GROUP_ID + +RUN addgroup --gid $GROUP_ID jenkins +RUN adduser --disabled-password --gecos '' --uid $USER_ID --gid $GROUP_ID jenkins + +ENV user jenkins + +RUN chown -R ${user} /home/${user} && \ + adduser ${user} sudo && \ + echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers + + +USER ${user} +WORKDIR /home/${user} + +RUN sudo apt-get install -y --no-install-recommends \ + curl build-essential git cmake \ + cuda-command-line-tools-10-0 \ + cuda-cublas-10-0 \ + cuda-cufft-10-0 \ + cuda-curand-10-0 \ + cuda-cusolver-10-0 \ + cuda-cusparse-10-0 \ + libcudnn7=${CUDNN}+cuda${CUDA} \ + vim \ + tmux \ + libhdf5-dev \ + libsm6 \ + libxext6 \ + libxrender-dev \ + wget + +# Install miniconda to /miniconda +RUN curl -LO http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh +RUN bash Miniconda-latest-Linux-x86_64.sh -p /home/${user}/miniconda -b +RUN rm Miniconda-latest-Linux-x86_64.sh +ENV PATH=/home/${user}/miniconda/bin:${PATH} +RUN conda update -y conda +RUN conda create -y -n py3-igibson python=3.6.8 +# Python packages from conda + +ENV PATH /home/${user}/miniconda/envs/py3-igibson/bin:$PATH + +RUN pip install pytest +RUN pip install Cython==0.21.1 \ No newline at end of file diff --git a/docker/jenkins/build.sh b/docker/jenkins/build.sh new file mode 100644 index 000000000..03207a8a6 --- /dev/null +++ b/docker/jenkins/build.sh @@ -0,0 +1 @@ +docker build -t gibsonchallenge/gibsonv2:jenkins2 --build-arg USER_ID=$(id -u jenkins) --build-arg GROUP_ID=$(id -g jenkins) . diff --git a/docker/pull-images.sh b/docker/pull-images.sh new file mode 100755 index 000000000..d6fc273d0 --- /dev/null +++ b/docker/pull-images.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +VERSION=1.0.1 + +docker pull igibson/igibson:v$VERSION +docker pull igibson/igibson:latest +docker pull igibson/igibson-gui:v$VERSION +docker pull igibson/igibson-gui:latest diff --git a/docker/push-images.sh b/docker/push-images.sh new file mode 100755 index 000000000..f7dfda01f --- /dev/null +++ b/docker/push-images.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +VERSION=1.0.1 + +docker push igibson/igibson:v$VERSION +docker push igibson/igibson:latest +docker push igibson/igibson-gui:v$VERSION +docker push igibson/igibson-gui:latest diff --git a/docs/assets.md b/docs/assets.md index abe3cba49..573127d3f 100644 --- a/docs/assets.md +++ b/docs/assets.md @@ -69,3 +69,7 @@ We also include [YCB objects](http://www.ycbbenchmarks.com/object-models/) in `y ## Test meshes In `test` folder we save a mesh file to test the renderer is compiled correctly, it is used by `test/test_render.py`. + +## Example configs + +Sample config files to be used with the demo. \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index ed8f39c42..7c1bbc072 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -23,7 +23,6 @@ import sphinx_rtd_theme from recommonmark.parser import CommonMarkParser -sys.path.append(os.path.abspath("../")) import igibson project = "iGibson" @@ -51,7 +50,6 @@ "sphinx_markdown_tables", "sphinx.ext.autodoc", "sphinx.ext.mathjax", - "myst_parser", ] html_theme = "sphinx_rtd_theme" diff --git a/docs/dataset.md b/docs/dataset.md index b8a9c30df..50cc050d8 100644 --- a/docs/dataset.md +++ b/docs/dataset.md @@ -1,35 +1,39 @@ -# Datasets +Dataset +========================================== -In this page you will find information about: +In dataset we include two parts. First we introduce the new iGibson dataset in this release. Secondly, we introduce + how to download previous Gibson dataset, which is updated and compatible with iGibson. -- [How to download iGibson 2.0 scenes and the BEHAVIOR Dataset of Objects](#download-igibson-2-0-scenes-and-the-behavior-dataset-of-objects) -- [How to download iGibson 1.0 scenes](#download-igibson-1-0-scenes) -- [How to download Gibson and Stanford 2D-3D-Semantics scenes](#download-gibson-and-stanford-2d-3d-semantics-scenes) -- [How to download Matterport3D scenes](#download-matterport3d-scenes) +- [Download iGibson 2.0 Scenes and Behavior Dataset of Objects](#download-igibson-2.0-scenes-and-behavior-dataset-of-objects) +- [Download iGibson 1.0 Scenes](#download-igibson-1.0-scenes) +- [Download Gibson Scenes](#download-gibson-scenes) -## Download iGibson 2.0 Scenes and the BEHAVIOR Dataset of Objects +Download iGibson 2.0 Scenes and Behavior Dataset of Objects +------------------------- -What will you download? -- **iGibson 2.0 Dataset of Scenes**: New versions of the fully interactive scenes, more densely populated with objects. -- **BEHAVIOR Object Dataset**: Dataset of object models annotated with physical and semantic properties. The 3D models are free to use within iGibson 2.0 for BEHAVIOR (due to artists' copyright, models are encrypted and allowed only to be used with iGibson 2.0). You can download a bundle of the iGibson 2.0 dataset of scenes and the BEHAVIOR dataset of objects here. +- iGibson 2.0 Dataset of Scenes: New versions of the fully interactive scenes, more densely populated with objects. +- BEHAVIOR Object Dataset: Dataset of object models annotated with physical and semantic properties. The 3D models are free to use within iGibson 2.0 for BEHAVIOR (due to artists' copyright, models are encrypted and allowed only to be used with iGibson 2.0). You can download a bundle of the iGibson 2.0 dataset of scenes and the BEHAVIOR dataset of objects here. To download both in a bundle, you need to follow the following steps: -- Fill out the license agreement in this [form](https://docs.google.com/forms/d/e/1FAIpQLScPwhlUcHu_mwBqq5kQzT2VRIRwg_rJvF0IWYBk_LxEZiJIFg/viewform) -- You will receive a key (igibson.key). Move it into the subfolder of the iGibson repository that contains the dataset, for example, iGibson/igibson/data -- Download the behavior data bundle (ig_dataset) from [here](https://storage.googleapis.com/gibson_scenes/behavior_data_bundle.zip) -- Unzip ig_dataset into the folder: `unzip behavior_data_bundle.zip -d iGibson/igibson/data` +Request access to the BEHAVIOR assets using this [form](https://docs.google.com/forms/d/e/1FAIpQLScPwhlUcHu_mwBqq5kQzT2VRIRwg_rJvF0IWYBk_LxEZiJIFg/viewform): +- Fill out the license agreement as suggested in the [form](https://docs.google.com/forms/d/e/1FAIpQLScPwhlUcHu_mwBqq5kQzT2VRIRwg_rJvF0IWYBk_LxEZiJIFg/viewform) +- When done, copy the key you receive (igibson.key) into the repository subfolder folder iGibson/igibson/data +- Download the behavior data bundle (ig_dataset) [here](https://storage.googleapis.com/gibson_scenes/behavior_data_bundle.zip). +- Unzip ig_dataset: + - `mkdir iGibson/igibson/data` + - `unzip behavior_data_bundle.zip -d iGibson/igibson/data` -After this process, you will be able to sample and use the scenes and objects in iGibson, for example, to evaluate your embodied AI solutions in the [BEHAVIOR benchmark](https://behavior.stanford.edu/). -## Download iGibson 1.0 Scenes +Download iGibson 1.0 Scenes +------------------------ -What will you download? -- **iGibson 1.0 Dataset of Scenes**: We annotated fifteen 3D reconstructions of real-world scans and converted them into fully interactive scene models. In this process, we respect the original object-instance layout and object-category distribution. The object models are extended from open-source datasets ([ShapeNet Dataset](https://www.shapenet.org/), [Motion Dataset](http://motiondataset.zbuaa.com/), [SAPIEN Dataset](https://sapien.ucsd.edu/)) enriched with annotations of material and dynamic properties. +We annotate fifteen 3D reconstructions of real-world scans and convert them into fully interactive scene models. In this process, we respect the original object-instance layout and object-category distribution. The object models are extended from open-source datasets ([ShapeNet Dataset](https://www.shapenet.org/), [Motion Dataset](http://motiondataset.zbuaa.com/), [SAPIEN Dataset](https://sapien.ucsd.edu/)) enriched with annotations of material and dynamic properties. -The following image shows the fifteen fully interactive scenes: +The fifteen fully interactive models are visualized below. ![placeholder.jpg](images/ig_scene.png) +#### Download Instruction To download the dataset, you need to first configure where the dataset is to be stored. You can change it in `your_installation_path/igibson/global_config.yaml` (default and recommended: `ig_dataset: your_installation_path/igibson/data/ig_dataset`). iGibson scenes can be downloaded with one single line: ```bash @@ -37,38 +41,54 @@ python -m igibson.utils.assets_utils --download_ig_dataset ``` If the script fails to work, you can download from this [direct link](https://storage.googleapis.com/gibson_scenes/ig_dataset.tar.gz) and extract to `your_installation_path/igibson/data/ig_dataset`. +#### Dataset Format +The new dataset format can be found [here](https://github.com/StanfordVL/iGibson/tree/master/igibson/utils/data_utils). -A description of the file structure and format of the files in the dataset can be found [here](https://github.com/StanfordVL/iGibson/tree/master/igibson/utils/data_utils). +#### Cubicasa / 3D Front Dataset +We provide support for Cubicasa and 3D Front Dataset, to import them into iGibson, follow the guide [here](https://github.com/StanfordVL/iGibson/tree/master/igibson/utils/data_utils/ext_scene). -**Cubicasa / 3D Front Dataset Support:** We provide support for Cubicasa and 3D Front Dataset providing more than 10000 additional scenes (with less furniture than our fifteen scenes). To import them into iGibson, follow the instructions [here](https://github.com/StanfordVL/iGibson/tree/master/igibson/utils/data_utils/ext_scene). +Download Gibson Scenes +------------------------ +Original Gibson Environment Dataset has been updated to use with iGibson simulator. The link will first take you to + the license agreement and then to the data. -## Download Gibson and Stanford 2D-3D-Semantics scenes +[[ Get download link for Gibson Data ]]. -What will you download? -- **Gibson static scenes**: more than 500 reconstructions of homes and offices with a Matterport device. These models keep the texture observed with the sensor, but contain some irregularities, specially with reflective surfaces and thin elements like chairs' legs. -- **Stanford 2D-3D-Semantics scenes**: 7 reconstructions of Stanford offices. +License Note: The dataset license is included in the above link. The license in this repository covers only the provided software. -Files included in the dataset: +Files included in this distribution: -- All scenes, 572 scenes (108GB): gibson_v2_all.tar.gz -- 4+ partition, 106 scenes, with textures better packed (2.6GB): gibson_v2_4+.tar.gz -- Stanford 2D-3D-Semantics, 7 scenes (1.4GB): 2d3ds_for_igibson.zip +1. All scenes, 572 scenes (108GB): gibson_v2_all.tar.gz +2. 4+ partition, 106 scenes, with textures better packed (2.6GB): gibson_v2_4+.tar.gz +3. Demo scene `Rs` -We have updated these datasets to be used with iGibson so that users can keep developing and studying pure navigation solutions. The following link will bring you to a license agreement and then to a downloading URL: [form](https://forms.gle/36TW9uVpjrE1Mkf9A) - -After filling in the agreement, you will obtain a downloading `URL`. -You can download the data manually and store it in the path set in `your_installation_path/igibson/global_config.yaml` (default and recommended: `g_dataset: your_installation_path/igibson/data/g_dataset`). -Alternatively, you can run a single command to download the dataset, decompress, and place it in the correct folder: +To download 1 and 2, you need to fill in the agreement and get the download link `URL`, after which you can + manually download and store them in the path set in `your_installation_path/igibson/global_config.yaml` (default and + recommended: `dataset: your_installation_path/igibson/data/g_dataset`). You can run a single command to download the dataset + , this script automatically download, decompress, and put the dataset to correct place. ```bash python -m igibson.utils.assets_utils --download_dataset URL ``` -The Gibson Environment Dataset consists of 572 models and 1440 floors. We cover a diverse set of models including households, offices, hotels, venues, museums, hospitals, construction sites, etc. A diverse set of visualization of all spaces in Gibson can be seen [here](http://gibsonenv.stanford.edu/database/). -The following image shows some of the environments: +To download 3, you can run: + +```bash +python -m igibson.utils.assets_utils --download_demo_data +``` + + +### Original Gibson Environment Dataset Description (Non-interactive) + + +Full Gibson Environment Dataset consists of 572 models and 1440 floors. We cover a diverse set of models including households, offices, hotels, venues, museums, hospitals, construction sites, etc. A diverse set of visualization of all spaces in Gibson can be seen [here](http://gibsonenv.stanford.edu/database/). + ![spaces.png](images/spaces.png) -**Gibson Dataset Metadata:** Each space in the database has some metadata with the following attributes associated with it. The metadata is available in this [JSON file](https://raw.githubusercontent.com/StanfordVL/GibsonEnv/master/gibson/data/data.json). + +#### Dataset Metadata + +Each space in the database has some metadata with the following attributes associated with it. The metadata is available in this [JSON file](https://raw.githubusercontent.com/StanfordVL/GibsonEnv/master/gibson/data/data.json). ``` id # the name of the space, e.g. ""Albertville"" area # total metric area of the building, e.g. "266.125" sq. meters @@ -81,13 +101,10 @@ split_full+ # if the space is in train/val/test/none split of Full+ split_medium # if the space is in train/val/test/none split of Medium partition split_tiny # if the space is in train/val/test/none split of Tiny partition ``` -- Floor Number: Total number of floors in each model. We calculate floor numbers using distinctive camera locations. We use `sklearn.cluster.DBSCAN` to cluster these locations by height and set minimum cluster size to `5`. This means areas with at least `5` sweeps are treated as one single floor. This helps us capture small building spaces such as backyard, attics, basements. -- Area: Total floor area of each model. We calculate total floor area by summing up area of each floor. This is done by sampling point cloud locations based on floor height, and fitting a `scipy.spatial.ConvexHull` on sample locations. -- SSA: Specific surface area. The ratio of inner mesh surface and volume of convex hull of the mesh. This is a measure of clutter in the models: if the inner space is placed with large number of furnitures, objects, etc, the model will have high SSA. -- Navigation Complexity: The highest complexity of navigating between arbitrary points within the model. We sample arbitrary point pairs inside the model, and calculate `A∗` navigation distance between them. `Navigation Complexity` is equal to `A*` distance divide by `straight line distance` between the two points. We compute the highest navigation complexity for every model. Note that all point pairs are sample within the *same floor*. -- Subjective Attributes: We examine each model manually, and note the subjective attributes of them. This includes their furnishing style, house shapes, whether they have long stairs, etc. -**Gibson Dataset Format:** Each space in the database has its own folder. All the modalities and metadata for each space are contained in that folder. +#### Dataset Format + +Each space in the database has its own folder. All the modalities and metadata for each space are contained in that folder. ``` mesh_z_up.obj # 3d mesh of the environment, it is also associated with an mtl file and a texture file, omitted here floors.txt # floor height @@ -98,13 +115,26 @@ floor_trav_{}.png # top down views of traversable areas for each floor For the maps, each pixel represents 0.01m, and the center of the image correspond to `(0,0)` in the mesh, as well as in the pybullet coordinate system. +#### Dataset Metrics + + +**Floor Number** Total number of floors in each model. + +We calculate floor numbers using distinctive camera locations. We use `sklearn.cluster.DBSCAN` to cluster these locations by height and set minimum cluster size to `5`. This means areas with at least `5` sweeps are treated as one single floor. This helps us capture small building spaces such as backyard, attics, basements. + +**Area** Total floor area of each model. + +We calculate total floor area by summing up area of each floor. This is done by sampling point cloud locations based on floor height, and fitting a `scipy.spatial.ConvexHull` on sample locations. + +**SSA** Specific surface area. + +The ratio of inner mesh surface and volume of convex hull of the mesh. This is a measure of clutter in the models: if the inner space is placed with large number of furnitures, objects, etc, the model will have high SSA. + +**Navigation Complexity** The highest complexity of navigating between arbitrary points within the model. -## Download Matterport3D Scenes -What will you download? -- Matterport3D Dataset: 90 scenes (3.2GB) +We sample arbitrary point pairs inside the model, and calculate `A∗` navigation distance between them. `Navigation Complexity` is equal to `A*` distance divide by `straight line distance` between the two points. We compute the highest navigation complexity for every model. Note that all point pairs are sample within the *same floor*. -Please fill in this [form](http://dovahkiin.stanford.edu/matterport/public/MP_TOS.pdf) and send it to [matterport3d@googlegroups.com](mailto:matterport3d@googlegroups.com). Please put "use with iGibson simulator" in your email. +**Subjective Attributes** -You'll then recieve a python script via email in response. Run `python download_mp.py --task_data igibson -o .` with the received script to download the data (3.2GB). Afterwards, move each of the scenes to the path set in `your_installation_path/igibson/global_config.yaml` (default and recommended: `g_dataset: your_installation_path/igibson/data/g_dataset`). +We examine each model manually, and note the subjective attributes of them. This includes their furnishing style, house shapes, whether they have long stairs, etc. -Reference: [Matterport3D webpage](https://niessner.github.io/Matterport/). \ No newline at end of file diff --git a/docs/environments.md b/docs/environments.md index bdf6c9d6a..d9bfd6f01 100644 --- a/docs/environments.md +++ b/docs/environments.md @@ -52,9 +52,9 @@ We provide a few common termination conditions for robotics tasks. Most of the code can be found in [igibson/termination_conditions](https://github.com/StanfordVL/iGibson/tree/master/igibson/termination_conditions). #### Configs -To instantiate an **Environment**, we first need to create a YAML config file. It will specify parameters for the **Environment** (e.g. robot type, action frequency, etc), the **Sensors** (e.g. sensor types, image resolution, noise rate, etc), the **Task** (e.g. task type, goal distance range, etc), the **Reward Functions** (e.g. reward types, reward scale, etc) and the **Termination Conditions** (e.g. goal convergence threshold, time limit, etc). Exapmles of config files can be found here: [configs](https://github.com/StanfordVL/iGibson/tree/master/igibson/configs). +To instantiate an **Environment**, we first need to create a YAML config file. It will specify parameters for the **Environment** (e.g. robot type, action frequency, etc), the **Sensors** (e.g. sensor types, image resolution, noise rate, etc), the **Task** (e.g. task type, goal distance range, etc), the **Reward Functions** (e.g. reward types, reward scale, etc) and the **Termination Conditions** (e.g. goal convergence threshold, time limit, etc). Exapmles of config files can be found here: [examples/configs](https://github.com/StanfordVL/iGibson/tree/master/examples/configs). -Here is one example: [configs/turtlebot_nav.yaml](https://github.com/StanfordVL/iGibson/blob/master/igibson/configs/turtlebot_nav.yaml) +Here is one example: [examples/configs/turtlebot_point_nav.yaml](https://github.com/StanfordVL/iGibson/blob/master/examples/configs/turtlebot_point_nav.yaml) ```yaml # scene @@ -130,7 +130,8 @@ depth_noise_rate: 0.0 scan_noise_rate: 0.0 # visual objects -visible_target: true +visual_object_at_initial_target_pos: true +target_visual_object_visible_to_agent: false ``` @@ -182,8 +183,8 @@ Parameters of this config file are explained below: | laser_link_name | scan_link | the link name of the LiDAR sensor in the robot URDF file | | depth_noise_rate | 0.0 | noise rate for the depth camera. 0.1 means 10% of the pixels will be corrupted (set to 0.0) | | scan_noise_rate | 0.0 | noise rate for the LiDAR. 0.1 means 10% of the rays will be corrupted (set to laser_linear_range) | -| visible_target | true | whether to show visual markers for the target positions that are visible to the agent | -| visible_path | true | whether to show visual markers for the computed shortest path that are visible to the agent | +| visual_object_at_initial_target_pos | true | whether to show visual markers for the initial and target positions | +| target_visual_object_visible_to_agent | false | whether these visual markers are visible to the agents | ### Examples @@ -193,48 +194,29 @@ In this example, we show how to instantiate `iGibsonEnv` and how to step through - `reward`: a scalar that represents the current reward - `done`: a boolean that indicates whether the episode should terminate - `info`: a python dictionary for bookkeeping purpose -The code can be found here: [igibson/examples/environments/env_nonint_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/environments/env_nonint_example.py). - -If the execution fails with segfault 11, you may need to reduce texture scaling in the config file (igibson/configs/turtlebot_static_nav.yaml) to avoid out-of-memory error. +The code can be found here: [igibson/examples/demo/env_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/env_example.py). ```python -import logging -import os -from sys import platform - -import yaml - -import igibson from igibson.envs.igibson_env import iGibsonEnv +from time import time +import igibson +import os from igibson.render.profiler import Profiler -from igibson.utils.assets_utils import download_assets, download_demo_data +import logging def main(): - """ - Creates an iGibson environment from a config file with a turtlebot in Rs (not interactive). - It steps the environment 100 times with random actions sampled from the action space, - using the Gym interface, resetting it 10 times. - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - # If they have not been downloaded before, download assets and Rs Gibson (non-interactive) models - download_assets() - download_demo_data() - config_filename = os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml") - config_data = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader) - # Reduce texture scale for Mac. - if platform == "darwin": - config_data["texture_scale"] = 0.5 - env = iGibsonEnv(config_file=config_data, mode="gui_interactive") + config_filename = os.path.join(igibson.example_config_path, 'turtlebot_demo.yaml') + env = iGibsonEnv(config_file=config_filename, mode='gui') for j in range(10): - logging.info("Resetting environment") env.reset() for i in range(100): - with Profiler("Environment action step"): + with Profiler('Environment action step'): action = env.action_space.sample() state, reward, done, info = env.step(action) if done: - logging.info("Episode finished after {} timesteps".format(i + 1)) + logging.info( + "Episode finished after {} timesteps".format(i + 1)) break env.close() @@ -244,4 +226,4 @@ if __name__ == "__main__": ``` #### Interactive Environments -In this example, we show how to instantiate `iGibsobEnv` with a fully interactive scene `Rs_int`. In this scene, the robot can interact with all the objects in the scene (chairs, tables, couches, etc). The code can be found here: [igibson/examples/environments/env_int_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/env_int_example.py). +In this example, we show how to instantiate `iGibsobEnv` with a fully interactive scene `Rs_int`. In this scene, the robot can interact with all the objects in the scene (chairs, tables, couches, etc). The code can be found here: [igibson/examples/demo/env_interactive_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/env_interactive_example.py). \ No newline at end of file diff --git a/docs/examples.md b/docs/examples.md deleted file mode 100644 index 5d67aa725..000000000 --- a/docs/examples.md +++ /dev/null @@ -1,21 +0,0 @@ -# Code Examples - -In this section, we include a description of most of the code examples you can find under `igibson/examples/`. -There may be new examples since we wrote this, so check that folder! - -If you are interested in just getting started as an end-user, you only need check out `igibson/examples/environments`. - -If you are looking for examples of BEHAVIOR, the benchmark of household activities that uses iGibson, please check the BEHAVIOR repository at https://github.com/StanfordVL/behavior. - -- environments: how to instantiate iGibson environments with interactive or static scenes, optionally with a scene selector. -- learning: how to train RL policies for robot navigation using stable baselines 3, and how to save and replay demos of agents for imitation learning. -- objects: how to create, load, and place objects to predefined locations or using a logic sampler (e.g. onTop(A, B)), how to change texture as a function of the temperature, and how to generate the minimum volume bounding boxes of objects. -- object_states: how to change various objects states, including dusty, stained, (water sources) toggled on, (cleaning tool) soaked, sliced, and temprature, and how to save and reload object states. -- observations: how to generate different observation modalities such as RGB, depth, LiDAR, segmentation, etc. -- renderer: how to use the renderer directly, without the physics engine. -- robots: how to (keyboard) control robots with differential drive controllers, IK controllers and sampling-based motion planners. -- ros: how to run ROS with iGibson as if it is the real world. -- scenes: how to load interactive and non-interactive scenes, how to use domain randomization (of object models and/or texture), and how to create a tour video of the scenes. -- vr: how to use iGibson with VR. -- web_ui: how to start a web server that hosts iGibson environments. - diff --git a/docs/extended_states.md b/docs/extended_states.md deleted file mode 100644 index c6c89cb5a..000000000 --- a/docs/extended_states.md +++ /dev/null @@ -1,82 +0,0 @@ -# Extended States and Logic States - -### Extended States - -iGibson 2.0 is an object-oriented simulator: the simulator maintains and updates a list of objects with properties that change over-time. -Object classes are organized following the WordNet hierarchy. -Different to other simulators, iGibson 2.0 is not limited to kinematic properties of the objects (pose, velocity, acceleration, joint configurations for articulated objects), but it includes also a set of additional object-properties that we call "extended states". -The extended states are: - -#### Temperature - -Real continuous value per object. It can change if the object is close/inside a source or sink of heat, and the source/sink is active. -![extstates1.jpg](images/extstates1.jpg) - -#### Wetness level - -Integer value per object. It changes when in contact with a water dropplet. -![extstates2.jpg](images/extstates2.jpg) - -#### Cleanliness (Dustiness and Stain Level) - -In iGibson 2.0, objects can be initialized with visible dust or stain particles on its surface. The number of particles at initialization corresponds to a 100% level of dustiness, d, or stains, s, as we assume that dust/stain particles cannot be generated after initialization. As particles are cleaned, the level decreases proportionally to the number of particles removed, reaching a level of 0% when all particles have been cleaned. -![extstates3.jpg](images/extstates3.jpg) - -#### Toggled State -Some object categories in iGibson 2.0 can be toggled on and off. iGibson 2.0 maintains and updates an internal binary functional state for those objects. -![extstates4a.jpg](images/extstates4a.jpg) - -#### Sliced State -Many cooking activities require the agent to slice objects, e.g. food items. -In iGibson 2.0 some objects can be sliced into two halves and the state of the object will be updated. -![extstates4b.jpg](images/extstates4b.jpg) - - -## Logic States - -Based on the kinematic and extended states, iGibson 2.0 implements a list of logic predicates, checking conditions to indicate if a logic state is True or False. -The logic states implemented and checked by iGibson 2.0 include: -- InsideOf(o1,o2): Object o1 is inside of object o2 if we can find two orthogonal axes crossing at o1 center of mass that intersect o2 collision mesh in both directions. -- OnTopOf(o1,o2): Object o1 is on top of object o2 if o2 is in the list of objects InSameNegativeVerticalAxisObjs(o1) and o2 is in the list of objects InSamePositiveVerticalAxisObjs(o1) and InContactWith(o1, o2), where InSamePositive/NegativeVerticalAxisObjs(o1) is the list of objects in the same positive/negative vertical axis as o1 and InContactWith(o1, o2) is whether the two objects are in physical contact. -- NextTo(o1,o2): Object o1 is next to object o2 if o2 is in the list of objects InSameHorizontalPlaneObjs(o1) and l2(o1, o2) < tNextTo , where InSameHorizontalPlaneObjs(o1) is a list of objects in the same horizontal plane as o1, l2 is the L2 distance between the closest points of the two objects, and tNextTo is a distance threshold that is proportional to the average size of the two objects. -- InContactWith(o1,o2): Object o1 is in contact with o2 if their surfaces are in contact in at least one point, i.e., o2 is in the list of objects InContactObjs(o1). -- Under(o1,o2): Object o1 is under object o2 if o2 is in the list of objects InSamePositiveVerticalAxisObjs(o1) and o2 6 is in the list of objects InSameNegativeVerticalAxisObjs(o1). -- OnFloor(o1,o2): Object o1 is on the room floor o2 if InContactWith(o1, o2) and o2 is of Room type. -- Open(o): Any joints (internal articulated degrees of freedom) of object o are open. Only joints that are relevant to consider an object Open are used in the predicate computation, e.g. the door of a microwave but not the buttons and controls. To select the relevant joints, object models of categories that can be Open undergo an additional annotation that produces a RelevantJoints list. A joint is considered open if its joint state q is 5% over the lower limit, i.e. q > 0.05(qUpperLimit − qLowerLimit ) + qLowerLimit. -- Cooked(o): The temperature of object o was over the cooked threshold, Tcooked , and under the burnt threshold, Tburnt, at least once in the history of the simulation episode, i.e., Tcooked ≤ T max o < Tburnt. We annotate the cooked temperature Tcooked for each object category that can be Cooked. -- Burnt(o): The temperature of object o was over the burnt threshold, Tburnt, at least once in the history of the simulation episode, i.e., T max o ≥ Tburnt. We annotate the burnt temperature Tburnt for each object category that can be Burnt. -- Frozen(o): The temperature of object o is under the freezing threshold, Tfrozen , i.e., To ≤ Tfrozen . We assume as default freezing temperature Tfrozen = 0◦C, a value that can be adapted per object category and model. -- Soaked(o): The wetness level w of the object o is over a threshold, wsoaked , i.e., w ≥ wsoaked . The default value for the threshold is wsoaked = 1, (the object is soaked if it absorbs one or more droplets), a value that can be adapted per object category and model. -- Dusty(o): The dustiness level d of the object o is over a threshold, ddusty , i.e., d > ddusty . The default value for the threshold is ddusty = 0.5, (half of the dust particles have been cleaned), a value that can be adapted per object category and model. -- Stained(o): The stain level s of the object o is over a threshold, sstained , i.e., s > sstained . The default value for the threshold is sstained = 0.5, (half of the stain particles have been cleaned), a value that can be adapted per object category and model. -- ToggledOn(o): Object o is toggled on or off. It is a direct query of the iGibson 2.0 objects’ extended state TS, the toggled state. -- Sliced(o): Object o is sliced or not. It is a direct access of the iGibson 2.0 objects’ extended state SS, the sliced state. -- InFoVOfAgent(o): Object o is in the field of view of the agent, i.e., at least one pixel of the image acquired by the agent’s onboard sensors corresponds to the surface of o. -- InHandOfAgent(o): Object o is grasped by the agent’s hands (i.e. assistive grasping is activated on that object). -- InReachOfAgent(o): Object o is within dreach = 2 meters away from the agent. -- InSameRoomAsAgent(o): Object o is located in the same room as the agent. - - -We do not annotate all objects all extended states. -Objects are annotated with extended states that allow to query meaningful and useful logic states from them, e.g. temperature to query if food is cooked (or frozen or burned). -This doesn't mean a table does not have temperature, but it is not relevant for most robotics activities simulated in iGibson 2.0. -We annotate object classes with meaningful logic states that apply to them, and, possibly, the parameters to check these states. -The list of classes and states can be found in the BDDL repository ([list of object characteristics](https://github.com/sanjanasrivastava/BDDL/blob/master/utils/synsets_to_filtered_properties_pruned_igibson.json)). - -The characteristics that can be annotated include (additional extended state and parameters in parenthesis): - (e.g., the temperature is not necessary/relevant for non-food categories for most tasks of interest). -- cookable: Can be Cooked (MaxTemperature, Temperature) -- burnable: Can be Burnt (MaxTemperature, Temperature) -- freezable: Can be Frozen (Temperature) -- soakable: Can be Soaked (WetnessLevel) -- dustyable: Can be Dusty (DustinessLevel) -- stainable: Can be Stained (StainLevel) -- toggleable: Can be ToggledOn (ToggledState) -- sliceable: Can be Sliced (SlicedState) - -Additionally, some objects present characteristics to help changing the logic states of others (additional extended state in parenthesis): -- Is a HeatSourceSink (ToggledState) -- Is a DropletSource (ToggledState) -- Is a CleaningTool -- Is a Liquid -- Is a Slicer diff --git a/docs/images/extstates1.jpg b/docs/images/extstates1.jpg deleted file mode 100644 index a73fca670..000000000 Binary files a/docs/images/extstates1.jpg and /dev/null differ diff --git a/docs/images/extstates2.jpg b/docs/images/extstates2.jpg deleted file mode 100644 index 11defc46e..000000000 Binary files a/docs/images/extstates2.jpg and /dev/null differ diff --git a/docs/images/extstates3.jpg b/docs/images/extstates3.jpg deleted file mode 100644 index e3b44afe9..000000000 Binary files a/docs/images/extstates3.jpg and /dev/null differ diff --git a/docs/images/extstates4.jpg b/docs/images/extstates4.jpg deleted file mode 100644 index 9b921b4f9..000000000 Binary files a/docs/images/extstates4.jpg and /dev/null differ diff --git a/docs/images/extstates4a.jpg b/docs/images/extstates4a.jpg deleted file mode 100644 index fcd409eb6..000000000 Binary files a/docs/images/extstates4a.jpg and /dev/null differ diff --git a/docs/images/extstates4b.jpg b/docs/images/extstates4b.jpg deleted file mode 100644 index d98e35312..000000000 Binary files a/docs/images/extstates4b.jpg and /dev/null differ diff --git a/docs/images/object.png b/docs/images/object.png new file mode 100644 index 000000000..e8ed128a8 Binary files /dev/null and b/docs/images/object.png differ diff --git a/docs/images/sampling2.gif b/docs/images/sampling2.gif deleted file mode 100644 index 79a3b2f11..000000000 Binary files a/docs/images/sampling2.gif and /dev/null differ diff --git a/docs/index.rst b/docs/index.rst index f26d580ed..19e43f2c5 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -21,20 +21,17 @@ Welcome to iGibson's documentation! dataset.md assets.md - simulators.md - physics_engine.md - extended_states.md renderer.md - viewer.md - environments.md + physics_engine.md scenes.md objects.md robots.md - sampling.md + simulators.md + viewer.md + environments.md learning_framework.md ros_integration.md tests.md - examples.md .. toctree:: :maxdepth: 1 diff --git a/docs/installation.md b/docs/installation.md index b0d963b40..72bd7cb6e 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -1,9 +1,7 @@ # Installation There are two steps to install iGibson, the Interactive Gibson Environment, on your computer. -First, you need to install the simulation environment. This may require installing additional dependencies. Then, you need to download the assets: models of the robotic agents, the interactive objects and 3D reconstructed real-world large environments for your agents to train. - -## Installing the Environment +First, you need to install the simulation environment. Then, you need to download the assets: models of the robotic agents, the interactive objects and 3D reconstructed real-world large environments for your agents to train. ### System Requirements @@ -31,35 +29,9 @@ The minimum system requirements are the following: Other system configurations may work, but we haven't tested them extensively and we probably won't be able to provide as much support as we want. -### Installing dependencies in Linux machines - -As most Python packages, we recommend to install iGibson in a virtual environment. -We suggest to use Conda instead of a standard virtual environment. -To setup anaconda with the right dependencies, run the following as your user account (**not as root/superuser**): - -```bash -# Install miniconda -curl -LO http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -bash Miniconda-latest-Linux-x86_64.sh -rm Miniconda-latest-Linux-x86_64.sh - -# Add conda to your PATH -echo "export PATH=$HOME/.miniconda/bin:$PATH" >> .bashrc - -# Update conda and create a virtual environment for iGibson -conda update -y conda -conda create -y -n igibson python=3.8 -conda activate igibson -``` - -
-

Careful, this may change your GPU drivers!

-There are several system dependencies to correctly run iGibson on Linux, mostly related to Nvidia drivers and Cuda. -In case your system is a clean Ubuntu 20.04, you can run the following commands as root/superuser to install all required dependencies: +## Installing dependencies -
- Click to expand the code to install the dependencies including Nvidia drivers in headless mode to use for example in a cluster: -

+Beginning with a clean ubuntu 20.04 installation, you **must run the following script as root/superuser** (`sudo su`) which will install all needed dependencies to build and run iGibson with CUDA 11.1: ```bash # Add the nvidia ubuntu repositories @@ -70,6 +42,8 @@ apt-get update && apt-get install -y --no-install-recommends \ echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu2004/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list # The following cuda libraries are required to compile igibson +# Note: the following assumes you will be using nvidia drivers on a headless node +# please use xserver-xorg-video-nvidia-470 if you are on a desktop apt-get update && apt-get update && apt-get install -y --no-install-recommends \ nvidia-headless-470 \ cuda-cudart-11-1=11.1.74-1 \ @@ -84,41 +58,24 @@ apt-get update && apt-get install -y --no-install-recommends \ g++ \ libegl-dev ``` -

-
-
- Click to expand the code to install the dependencies including Nvidia drivers to render on screen for example on a desktop computer: -

+ +Conda is recommended over standard virtual environments. To setup anaconda with the requisite dependencies, run the following as your user account (**not as root/superuser**): ```bash -# Add the nvidia ubuntu repositories -apt-get update && apt-get install -y --no-install-recommends \ - gnupg2 curl ca-certificates && \ - curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/7fa2af80.pub | apt-key add - && \ - echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64 /" > /etc/apt/sources.list.d/cuda.list && \ - echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu2004/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list +# Install miniconda +curl -LO http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh +bash Miniconda-latest-Linux-x86_64.sh +rm Miniconda-latest-Linux-x86_64.sh -# The following cuda libraries are required to compile igibson -apt-get update && apt-get update && apt-get install -y --no-install-recommends \ - xserver-xorg-video-nvidia-470 \ - cuda-cudart-11-1=11.1.74-1 \ - cuda-compat-11-1 \ - cuda-command-line-tools-11-1=11.1.1-1 \ - cuda-libraries-dev-11-1=11.1.1-1 \ +# Add conda to your PATH +echo "export PATH=$HOME/.miniconda/bin:$PATH" >> .bashrc -# For building and running igibson -apt-get update && apt-get install -y --no-install-recommends \ - cmake \ - git \ - g++ \ - libegl-dev +# Update conda and create a virtual environment for iGibson +conda update -y conda +conda create -y -n igibson python=3.8 +conda activate igibson ``` -

-
- -
- By default, iGibson builds with CUDA support which requires that `nvcc` is on your path (or CUDA 11 is symlinked to `/usr/local/cuda` from `/usr/local/cuda-11.1`). Cmake uses `nvcc` to find the CUDA libraries and headers when building iGibson. Add the following to your shell rc (`.bashrc`, `.zshrc`, etc.) and re-login to your shell (`exec bash`, `exec zsh`, etc.): ```bash export PATH=/usr/local/cuda-11.1/bin:$PATH @@ -134,23 +91,23 @@ Cuda compilation tools, release 11.1, V11.1.105 Build cuda_11.1.TC455_06.29190527_0 ``` -In case you want to build without CUDA support (used for the "rendering to GPU tensor" feature), you will have to set `USE_CUDA` to `False` in `iGibson/igibson/render/CMakeLists.txt`. +To build without CUDA support (used for the "rendering to GPU tensor" feature), you will have to set `USE_CUDA` to `False` in `iGibson/igibson/render/CMakeLists.txt`. -### Installing iGibson +## Installing the Environment We provide 3 methods to install the simulator. -#### 1. pip +### 1. pip iGibson's simulator can be installed as a python package using pip: ```bash pip install igibson # This step takes about 4 minutes # run the demo -python -m igibson.examples.environments.env_nonint_example +python -m igibson.examples.demo.demo_static ``` -#### 2. Docker image +### 2. Docker image Docker provides an easy way to reproduce the development environment across platforms without manually installing the software dependencies. We have prepared docker images that contain everything you need to get started with iGibson. @@ -179,7 +136,7 @@ cd iGibson/docker/headless-gui ``` -#### 3. Compile from source +### 3. Compile from source Alternatively, iGibson can be compiled from source: [iGibson GitHub Repo](https://github.com/StanfordVL/iGibson). First, you need to install anaconda following the guide on [their website](https://www.anaconda.com/). @@ -187,10 +144,8 @@ Alternatively, iGibson can be compiled from source: [iGibson GitHub Repo](https: git clone https://github.com/StanfordVL/iGibson --recursive cd iGibson -# if you didn't create the conda environment before: -conda create -y -n igibson python=3.8 -conda activate igibson - +conda create -n py3-igibson python=3.8 anaconda # we support python 2.7, 3.5, 3.6, 3.7, 3.8 +source activate py3-igibson pip install -e . # This step takes about 4 minutes ``` @@ -198,20 +153,19 @@ We recommend the third method if you plan to modify iGibson in your project. If Note: If you are not using conda, you will need the system packages python3-dev (header files to build Python extensions) and python3-opencv (provides opencv and its dependencies). -### The SVL pybullet fork - -To optimize and accelerate physics simulation, we use a custom version of pybullet in iGibson. This is installed automatically if you install iGibson in a fresh conda environment, but if you already have a regular pybullet, you should manually remove it and install our fork as follows (otherwise your 'pip install -e .' will fail): +## The SVL pybullet fork +Note: we support using a custom pybullet version to speed up the physics in iGibson. This is installed automatically if you install iGibson. If you already have pybullet installed in your conda environment, you can replace it with our fork as follows: ```bash pip uninstall pybullet pip install pybullet-svl ``` -## Downloading the Assets and Datasets of Scenes and Objects +## Downloading the Assets -Once the environment has been installed, we need to download the assets to enable the simulation including models of the robotic agents, objects, 3D scenes, etc. This process requires three simple steps. - -First, we need to configure where the iGibson's assets are going to be stored. The desired path should be indicated in `your_installation_path/igibson/global_config.yaml`. The default place to store the data is: +First, configure where iGibson's assets (robotic agents, objects, 3D environments, etc.) is going to be stored. It is configured in `your_installation_path/igibson/global_config.yaml` + +To make things easier, the default place to store the data is: ```bash assets_path: your_installation_path/igibson/data/assets g_dataset_path: your_installation_path/igibson/data/g_dataset @@ -220,50 +174,47 @@ threedfront_dataset_path: your_installation_path/igibson/data/threedfront_datase cubicasa_dataset_path: your_installation_path/igibson/data/assetscubicasa_dataset ``` -In case you prefer to store the assets in a different location, you can run the command: +If you are happy with the default path, you don't have to do anything, otherwise you can run this script: ```bash python -m igibson.utils.assets_utils --change_data_path ``` -Second, we need to download the robot models and some small objects from the assets bundle [here](https://storage.googleapis.com/gibson_scenes/assets_igibson.tar.gz) and unpack it in the assets folder. More easily, this process can be automatically done by executing the command: +Second, you can download our robot models and objects from [here](https://storage.googleapis.com/gibson_scenes/assets_igibson.tar.gz) and unpack it in the assets folder, or simply run this download script: ```bash python -m igibson.utils.assets_utils --download_assets ``` -Third, we need to download datasets of scenes (Gibson or iGibson), and, possibly, the BEHAVIOR Datasets of Object Models. -For interactive tasks, you need to download iGibson 2.0 Scenes and the BEHAVIOR Dataset of Objects, or iGibson 1.0 Scenes. They include several fully interactive scenes and hundreds of 3D objects to use with our simulator. -For navigation tasks, you could use the interactive scenes, but we also provide back-compatibility to the Gibson Dataset, Stanford 2D-3D-Semantics Dataset, and Matterport3D Dataset that include non-interactive scene models. -Follow the detailed instructions [here](dataset.md) to download the aforementioned datasets. -Alternatively, to avoid downloading an entire dataset before you can test iGibson's functionalities, we provide a single [high quality small non-interactive scene (R's)](https://storage.googleapis.com/gibson_scenes/Rs.tar.gz) for demo and testing purposes. -To download this demo data, run: +Third, you need to download some large 3D reconstructed real-world environments (e.g. houses and offices) from [our dataset](dataset.md) for your agents to be trained in. Create a new folder for those environments and set the path in `your_installation_path/igibson/global_config.yaml` (default and recommended: `your_installation_path/igibson/data/g_dataset` and `your_installation_path/igibson/data/ig_dataset`). You can get access and download the Gibson dataset (after filling up the following [license agreement](https://forms.gle/36TW9uVpjrE1Mkf9A)) and the iGibson dataset (following the guide [here](http://svl.stanford.edu/igibson/docs/dataset.html#download-instruction) or following the instructions below). In addition, you can download a single [high quality small environment R's](https://storage.googleapis.com/gibson_scenes/Rs.tar.gz) for demo purposes. + +To download the demo data, run: ```bash python -m igibson.utils.assets_utils --download_demo_data ``` -## Examples +The full Gibson and iGibson dataset can be downloaded using the following command, this script automatically downloads, decompresses, and puts the dataset to correct place. You will get `URL` after filling in the agreement form. -We provide multiple examples to get you started! -Check the folder [igibson/examples](https://github.com/StanfordVL/iGibson/tree/master/igibson/examples) and the description [here](examples.md). - -After installing the code and downloading the demo data, you should be able to try out a simple robot navigation demo executing: +Download iGibson dataset +```bash +python -m igibson.utils.assets_utils --download_ig_dataset +``` +Download Gibson dataset ([agreement signing](https://forms.gle/36TW9uVpjrE1Mkf9A) required to get `URL`) ```bash -python -m igibson.examples.environments.env_nonint_example +python -m igibson.utils.assets_utils --download_dataset URL ``` -## Testing +## Testing -To test iGibson installation, you can run +To test igibson is properly installed, you can run ```bash python >> import igibson ``` For a full suite of tests and benchmarks, you can refer to [tests](tests.md) for more details. -(For Mac users) Some tests will fail as they require a Nvidia GPU. ## Uninstalling Uninstalling iGibson is easy: `pip uninstall igibson` diff --git a/docs/intro.md b/docs/intro.md index 9b931978d..85cc9df72 100644 --- a/docs/intro.md +++ b/docs/intro.md @@ -2,24 +2,20 @@ ### Large Scale Interactive Simulation Environments for Robot Learning -iGibson, the Interactive Gibson Environment, is a simulation environment providing fast visual rendering and physics simulation (based on Bullet). -It is packed with a dataset with hundreds of large 3D environments reconstructed from real homes and offices, and interactive objects that can be pushed and actuated. -iGibson allows researchers to train and evaluate robotic agents that use RGB images and/or other visual sensors to solve indoor (interactive) navigation and mobile manipulation tasks such as opening doors, picking and placing objects, or searching for objects. -With the latest extension, iGibson 2.0 supports new types of [object state changes](extended_states.md) (cook, soak, slice, freeze, etc), that can enable new types of simulated activities! -iGibson implements all features required to evaluate AI solutions in the BEHAVIOR benchmark: [sampling logic activity descriptions](sampling.md), [checking logic states](extended_states.md), connecting to the BEHAVIOR dataset of 3D objects and evaluating BEHAVIOR metrics (information about the [dataset](https://stanfordvl.github.io/behavior/objects.html) and the [metrics](https://stanfordvl.github.io/behavior/metrics.html) can be found in the documentation of the BEHAVIOR repository). +iGibson, the Interactive Gibson Environment, is a simulation environment providing fast visual rendering and physics simulation (based on Bullet). It is packed with a dataset with hundreds of large 3D environments reconstructed from real homes and offices, and interactive objects that can be pushed and actuated. iGibson allows researchers to train and evaluate robotic agents that use RGB images and/or other visual sensors to solve indoor (interactive) navigation and manipulation tasks such as opening doors, picking and placing objects, or searching in cabinets. ### Citation -If you use iGibson or its assets and models, consider citing the following publications: +If you use iGibson or its assets and models, consider citing the following publication: ``` -@inproceedings{shen2021igibson, +@misc{shen2021igibson, title={iGibson 1.0: a Simulation Environment for Interactive Tasks in Large Realistic Scenes}, - author={Bokui Shen and Fei Xia and Chengshu Li and Roberto Mart\'in-Mart\'in and Linxi Fan and Guanzhi Wang and Claudia Pérez-D'Arpino and Shyamal Buch and Sanjana Srivastava and Lyne P. Tchapmi and Micael E. Tchapmi and Kent Vainio and Josiah Wong and Li Fei-Fei and Silvio Savarese}, - booktitle={2021 IEEE/RSJ International Conference on Intelligent Robots and Systems}, + author={Bokui Shen and Fei Xia and Chengshu Li and Roberto Martín-Martín and Linxi Fan and Guanzhi Wang and Claudia Pérez-D'Arpino and Shyamal Buch and Sanjana Srivastava and Lyne P. Tchapmi and Micael E. Tchapmi and Kent Vainio and Josiah Wong and Li Fei-Fei and Silvio Savarese}, year={2021}, - pages={accepted}, - organization={IEEE} + eprint={2012.02924}, + archivePrefix={arXiv}, + primaryClass={cs.AI} } ``` @@ -35,9 +31,9 @@ If you use iGibson or its assets and models, consider citing the following publi ``` ### Code Release -The GitHub repository of iGibson can be found [here](https://github.com/StanfordVL/iGibson). Bug reports, suggestions for improvement, as well as community developments are encouraged and appreciated. +The GitHub repository of iGibson can be found here: [iGibson GitHub Repo](https://github.com/StanfordVL/iGibson). Bug reports, suggestions for improvement, as well as community developments are encouraged and appreciated. ### Documentation -The documentation for iGibson can be found [here](http://svl.stanford.edu/igibson/docs/). It includes installation guide (including data download instructions), quickstart guide, code examples, and APIs. +The documentation for iGibson can be found here: [iGibson Documentation](http://svl.stanford.edu/igibson/docs/). It includes installation guide (including data download instructions), quickstart guide, code examples, and APIs. -If you want to know more about iGibson, you can also check out [our webpage](http://svl.stanford.edu/igibson), the [iGibson 2.0 arxiv preprint](https://arxiv.org/abs/2108.03272) and the [iGibson 1.0 arxiv preprint](https://arxiv.org/abs/2012.02924). +If you want to know more about iGibson, you can also check out [our webpage](http://svl.stanford.edu/igibson), [iGibson 2.0 arxiv preprint](https://arxiv.org/abs/2108.03272) and [iGibson 1.0 arxiv preprint](https://arxiv.org/abs/2012.02924). diff --git a/docs/objects.md b/docs/objects.md index cb91f4ff7..022aa7814 100644 --- a/docs/objects.md +++ b/docs/objects.md @@ -91,176 +91,55 @@ Instruction can be found here: [External Objects](https://github.com/StanfordVL/ ### Examples -In this example, we import a few objects into iGibson. The code can be found here: [igibson/examples/objects/load_objects.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/objects/load_objects.py). +In this example, we import three objects into PyBullet, two of which are articulated objects. The code can be found here: [igibson/examples/demo/object_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/object_example.py). ```python -import logging -import os -from sys import platform - -import numpy as np -import yaml - -import igibson -from igibson.envs.igibson_env import iGibsonEnv -from igibson.objects.articulated_object import URDFObject from igibson.objects.ycb_object import YCBObject -from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings -from igibson.render.profiler import Profiler -from igibson.robots.turtlebot import Turtlebot -from igibson.scenes.empty_scene import EmptyScene -from igibson.scenes.gibson_indoor_scene import StaticIndoorScene -from igibson.simulator import Simulator -from igibson.utils.assets_utils import get_ig_avg_category_specs, get_ig_category_path, get_ig_model_path -from igibson.utils.utils import let_user_pick, parse_config +from igibson.objects.articulated_object import ArticulatedObject +import igibson +import os +import pybullet as p +import pybullet_data +import time def main(): - """ - This demo shows how to load scaled objects from the iG object model dataset and - additional objects from the YCB dataset in predefined locations - Loads a concrete object model of a table, and a random one of the same category, and 10 cracker boxes - The objects can be loaded into an empty scene, an interactive scene (iG) or a static scene (Gibson) - The example also shows how to use the Environment API or directly the Simulator API, loading objects and robots - and executing actions - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - scene_options = ["Empty scene", "Interactive scene (iG)", "Static scene (Gibson)"] - type_of_scene = let_user_pick(scene_options) - 1 - - if type_of_scene == 0: # Empty - config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml")) - settings = MeshRendererSettings(enable_shadow=False, msaa=False, texture_scale=0.5) - s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings) - scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1]) - # scene.load_object_categories(benchmark_names) - s.import_scene(scene) - robot_config = config["robot"] - robot_config.pop("name") - turtlebot = Turtlebot(**robot_config) - s.import_robot(turtlebot) - - elif type_of_scene == 1: # iG - config_filename = os.path.join(igibson.example_config_path, "turtlebot_nav.yaml") - config_data = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader) - config_data["load_object_categories"] = [] # Uncomment this line to accelerate loading with only the building - config_data["visible_target"] = False - config_data["visible_path"] = False - # Reduce texture scale for Mac. - if platform == "darwin": - config_data["texture_scale"] = 0.5 - env = iGibsonEnv(config_file=config_data, mode="gui_interactive") - s = env.simulator + p.connect(p.GUI) + p.setGravity(0, 0, -9.8) + p.setTimeStep(1./240.) - elif type_of_scene == 2: # Gibson - config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml")) - settings = MeshRendererSettings(enable_shadow=False, msaa=False) - # Reduce texture scale for Mac. - if platform == "darwin": - settings.texture_scale = 0.5 - s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings) + floor = os.path.join(pybullet_data.getDataPath(), "mjcf/ground_plane.xml") + p.loadMJCF(floor) - scene = StaticIndoorScene("Rs", build_graph=True, pybullet_load_texture=False) - s.import_scene(scene) - robot_config = config["robot"] - robot_config.pop("name") - turtlebot = Turtlebot(**robot_config) - s.import_robot(turtlebot) + cabinet_0007 = os.path.join( + igibson.assets_path, 'models/cabinet2/cabinet_0007.urdf') + cabinet_0004 = os.path.join( + igibson.assets_path, 'models/cabinet/cabinet_0004.urdf') - # Set a better viewing direction - s.viewer.initial_pos = [-2, 1.4, 1.2] - s.viewer.initial_view_direction = [0.6, -0.8, 0.1] - s.viewer.reset_viewer() + obj1 = ArticulatedObject(filename=cabinet_0007) + obj1.load() + obj1.set_position([0, 0, 0.5]) - # Objects to load: two tables, the first one is predefined model, the second, random for the same category - table_objects_to_load = { - "table_1": { - "category": "breakfast_table", - "model": "1b4e6f9dd22a8c628ef9d976af675b86", - "pos": (0.0, -0.2, 1.01), - "orn": (0, 0, 90), - }, - "table_2": { - "category": "breakfast_table", - "pos": (0.5, -2.0, 1.01), - "orn": (0, 0, 45), - }, - } + obj2 = ArticulatedObject(filename=cabinet_0004) + obj2.load() + obj2.set_position([0, 0, 2]) - # Load the specs of the object categories, e.g., common scaling factor - avg_category_spec = get_ig_avg_category_specs() + obj3 = YCBObject('003_cracker_box') + obj3.load() + obj3.set_position_orientation([0, 0, 1.2], [0, 0, 0, 1]) - scene_objects = {} - try: - for obj in table_objects_to_load.values(): - category = obj["category"] - if category in scene_objects: - scene_objects[category] += 1 - else: - scene_objects[category] = 1 + for _ in range(24000): # at least 100 seconds + p.stepSimulation() + time.sleep(1./240.) - # Get the path for all models of this category - category_path = get_ig_category_path(category) + p.disconnect() - # If the specific model is given, we use it. If not, we select one randomly - if "model" in obj: - model = obj["model"] - else: - model = np.random.choice(os.listdir(category_path)) - # Create the full path combining the path for all models and the name of the model - model_path = get_ig_model_path(category, model) - filename = os.path.join(model_path, model + ".urdf") - - # Create a unique name for the object instance - obj_name = "{}_{}".format(category, scene_objects[category]) - - # Create and import the object - simulator_obj = URDFObject( - filename, - name=obj_name, - category=category, - model_path=model_path, - avg_obj_dims=avg_category_spec.get(category), - fit_avg_dim_volume=True, - texture_randomization=False, - overwrite_inertial=True, - initial_pos=obj["pos"], - initial_orn=obj["orn"], - ) - s.import_object(simulator_obj) - - for _ in range(10): - obj = YCBObject("003_cracker_box") - s.import_object(obj) - obj.set_position_orientation(np.append(np.random.uniform(low=0, high=2, size=2), [1.8]), [0, 0, 0, 1]) - - if type_of_scene == 1: - for j in range(10): - logging.info("Resetting environment") - env.reset() - for i in range(100): - with Profiler("Environment action step"): - # action = env.action_space.sample() - state, reward, done, info = env.step([0.1, 0.1]) - if done: - logging.info("Episode finished after {} timesteps".format(i + 1)) - break - else: - for i in range(10000): - with Profiler("Simulator step"): - turtlebot.apply_action([0.1, 0.1]) - s.step() - rgb = s.renderer.render_robot_cameras(modes=("rgb")) - - finally: - if type_of_scene == 1: - env.close() - else: - s.disconnect() +if __name__ == '__main__': + main() +``` -if __name__ == "__main__": - main() +You can open the cabinet and the drawer by dragging your mouse over them. You can even put the cereal box into the drawer like this: +![object](images/object.png) -``` \ No newline at end of file diff --git a/docs/overview.md b/docs/overview.md index 182921208..96a5ea4c2 100644 --- a/docs/overview.md +++ b/docs/overview.md @@ -1,38 +1,16 @@ -# Overview of Modules +# Overview Next, we will give an overview of iGibson and briefly explain the different modules in our system. ![quickstart.png](images/overview.png) -First of all, we have **Datasets** and **Assets**. -**Datasets** include the 3D reconstructed real-world environments (iGibson Dataset), and 3D models of objects (BEHAVIOR Dataset of Objects). -**Assets** contain models of robots and some additional 3D objects. -The download guide for the assets can be found [here](installation.html#downloading-the-assets). -More info can be found in the sections [Datasets](dataset.md) and [Assets](assets.md). +First of all, we have **Dataset** and **Assets**. **Dataset** contain 3D reconstructed real-world environments. **Assets** contain models of robots and objects. Download guide can be found [here](installation.html#downloading-the-assets). More info can be found here: [Dataset](dataset.md) and [Assets](assets.md). -Next, we have **Renderer** and **PhysicsEngine**. -These are the two pillars that ensure the visual and physics fidelity of iGibson. -We developed our own renderer that supports customizable camera configuration, physics-based rendering (PBR) and various image modalities, and renders at a lightening speed. -We use the open-sourced [PyBullet](http://www.pybullet.org/) as our underlying physics engine. -It can simulate rigid body collision and joint actuation for robots and articulated objects in an accurate and efficient manner. -Since we are using MeshRenderer for rendering and PyBullet for physics simulation, we need to keep them synchronized at all time. -Our code have already handled this for you. More info can be found here: [Renderer](renderer.md) and [PhysicsEngine](physics_engine.md). +Next, we have **Renderer** and **PhysicsEngine**. These are the two pillars that ensure the visual and physics fidelity of iGibson. We developed our own MeshRenderer that supports customizable camera configuration, physics-based rendering (PBR) and various image modalities, and renders at a lightening speed. We use the open-sourced [PyBullet](http://www.pybullet.org/) as our underlying physics engine. It can simulate rigid body collision and joint actuation for robots and articulated objects in an accurate and efficient manner. Since we are using MeshRenderer for rendering and PyBullet for physics simulation, we need to keep them synchronized at all time. Our code have already handled this for you. More info can be found here: [Renderer](renderer.md) and [PhysicsEngine](physics_engine.md). -Furthermore, we have **Scene**, **Object**, **Robot**, and **Simulator**. -**Scene** loads 3D scene meshes from `igibson.g_dataset_path, igibson.ig_dataset_path` and loads/holds the list of objects associated with an interactive scene. -**Object** loads interactable objects from `igibson.assets_path`. -**Robot** loads robots from `igibson.assets_path`. -**Simulator** maintains an instance of **Renderer** and **PhysicsEngine** and provides APIs to import **Scene**, **Object** and **Robot** into both of them and keep them synchronized at all time. -More info can be found here: [Scene](./scenes.md), [Object](./objects.md), [Robot](./robots.md), and [Simulator](simulators.md). +Furthermore, we have **Scene**, **Object**, **Robot**, and **Simulator**. **Scene** loads 3D scene meshes from `igibson.g_dataset_path, igibson.ig_dataset_path`. **Object** loads interactable objects from `igibson.assets_path`. **Robot** loads robots from `igibson.assets_path`. **Simulator** maintains an instance of **Renderer** and **PhysicsEngine** and provides APIs to import **Scene**, **Object** and **Robot** into both of them and keep them synchronized at all time. More info can be found here: [Scene](./scenes.md), [Object](./objects.md), [Robot](./robots.md), and [Simulator](simulators.md). -Moreover, we have **Task**, **Sensor** and **Environment**. -**Task** defines the task setup and includes a list of **Reward Function** and **Termination Condition**. -It also provides task-specific reset functions and task-relevant observation definition. -**Sensor** provides a light wrapper around **Render** to retrieve sensory observation. -**Environment** follows the [OpenAI gym](https://github.com/openai/gym) convention and provides an API interface for external applications. -More info can be found here: [Environment](environments.md). +Moreover, we have **Task**, **Sensor** and **Environment**. **Task** defines the task setup and includes a list of **Reward Function** and **Termination Condition**. It also provides task-specific reset functions and task-relevant observation definition. **Sensor** provides a light wrapper around **Render** to retrieve sensory observation. **Environment** follows the [OpenAI gym](https://github.com/openai/gym) convention and provides an API interface for external applications. More info can be found here: [Environment](environments.md). -Finally, any learning framework (e.g. RL, IL) or planning and control framework (e.g. ROS) can be used with **Environment** as long as they accommodate the OpenAI gym interface. -We provide tight integration with **ROS** that allows for evaluation and visualization of, say, ROS Navigation Stack, in iGibson. -More info can be found here: [Learning Framework](learning_framework.md) and [ROS](ros_integration.md). +Finally, any learning framework (e.g. RL, IL) or planning and control framework (e.g. ROS) can be used with **Environment** as long as they accommodate OpenAI gym interface. We provide tight integration with **ROS** that allows for evaluation and visualization of, say, ROS Navigation Stack, in iGibson. More info can be found here: [Learning Framework](learning_framework.md) and [ROS](ros_integration.md). We highly recommend you go through each of the Modules below for more details and code examples. diff --git a/docs/physics_engine.md b/docs/physics_engine.md index 7e444bf3a..43b1dc8c9 100644 --- a/docs/physics_engine.md +++ b/docs/physics_engine.md @@ -8,7 +8,7 @@ Typically, we use `p.createMultiBody` and `p.loadURDF` to load scenes, objects a More info can be found in here: [PyBullet documentation](https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA). ### Examples -In this example, we import a scene, a robot and an object into PyBullet and step through a few seconds of simulation. +In this example, we import a scene, a robot and an object into PyBullet and step through a few seconds of simulation. The code can be found here:[igibson/examples/demo/physics_engine_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/physics_engine_example.py). ```python import pybullet as p diff --git a/docs/quickstart.md b/docs/quickstart.md index c4f937ecd..e72c30fbe 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -4,12 +4,9 @@ Assume you finished installation and assets downloading. Let's get our hands dirty and see iGibson in action. ```bash -python -m igibson.examples.environments.env_nonint_example +python -m igibson.examples.demo.env_example ``` - -If the execution fails with segfault 11, you may need to reduce texture scaling in the config file (igibson/configs/turtlebot_static_nav.yaml) to avoid out-of-memory error. - -You should see something like this. If you are on Mac OS X, you will only see the two small windows. +You should see something like this: ![quickstart.png](images/quickstart.png) The main window shows PyBullet visualization. The robot (TurtleBot) is moving around with random actions in a realistic house (called "Rs", the one you just downloaded!). diff --git a/docs/renderer.md b/docs/renderer.md index ec6ce23e7..73b975d24 100644 --- a/docs/renderer.md +++ b/docs/renderer.md @@ -8,53 +8,38 @@ We developed our own MeshRenderer that supports customizable camera configuratio #### Simple Example -In this example, we render an iGibson scene with a few lines of code. The code can be found in [igibson/examples/renderer/mesh_renderer_simple_example.py ](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/renderer/mesh_renderer_simple_example.py). +In this example, we render an iGibson scene with a few lines of code. The code can be found in [igibson/examples/demo/mesh_renderer_simple_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/mesh_renderer_simple_example.py). ``` -import logging -import os -import sys - import cv2 +import sys +import os import numpy as np - from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer from igibson.utils.assets_utils import get_scene_path def main(): - """ - Minimal example of use of the renderer. Loads Rs (non interactive), renders one set of images (RGB, normals, - 3D points (as depth)), shows them. - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - - # If a model is given, we load it, otherwise we load Rs mesh (non interactive) if len(sys.argv) > 1: model_path = sys.argv[1] else: - model_path = os.path.join(get_scene_path("Rs"), "mesh_z_up.obj") + model_path = os.path.join(get_scene_path('Rs'), 'mesh_z_up.obj') renderer = MeshRenderer(width=512, height=512) renderer.load_object(model_path) - renderer.add_instance_group([0]) + renderer.add_instance(0) camera_pose = np.array([0, 0, 1.2]) view_direction = np.array([1, 0, 0]) renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1]) renderer.set_fov(90) - frames = renderer.render(modes=("rgb", "normal", "3d")) - - # Render 3d points as depth map - depth = np.linalg.norm(frames[2][:, :, :3], axis=2) - depth /= depth.max() - frames[2][:, :, :3] = depth[..., None] - + frames = renderer.render( + modes=('rgb', 'normal', '3d')) frames = cv2.cvtColor(np.concatenate(frames, axis=1), cv2.COLOR_RGB2BGR) - cv2.imshow("image", frames) + cv2.imshow('image', frames) cv2.waitKey(0) -if __name__ == "__main__": +if __name__ == '__main__': main() ``` @@ -66,9 +51,9 @@ For `Rs` scene, the rendering results will look like this: In this example, we show an interactive demo of MeshRenderer. ```bash -python -m igibson.examples.renderer.mesh_renderer_example +python -m igibson.examples.demo.mesh_renderer_example ``` -You may translate the camera by pressing "WASD" on your keyboard and rotate the camera by dragging your mouse. Press `Q` to exit the rendering loop. The code can be found in [igibson/examples/renderer/mesh_renderer_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/renderer/mesh_renderer_example.py). +You may translate the camera by pressing "WASD" on your keyboard and rotate the camera by dragging your mouse. Press `Q` to exit the rendering loop. The code can be found in [igibson/examples/demo/mesh_renderer_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/mesh_renderer_example.py). #### PBR (Physics-Based Rendering) Example @@ -77,7 +62,7 @@ You can test the physically based renderer with the PBR demo. You can render any obj files in the folder. ```bash -python -m igibson.examples.renderer.mesh_renderer_example_pbr /objects/sink/sink_1/shape/visual +python -m igibson.examples.demo.mesh_renderer_example_pbr /objects/sink/sink_1/shape/visual ``` ![pbr_renderer.png](images/pbr_render.png) @@ -86,54 +71,12 @@ You will get a nice rendering of the sink, and should see the metal parts have s #### Velodyne VLP-16 Example -In this example, we show a demo of 16-beam Velodyne VLP-16 LiDAR placed on top of a virtual Turtlebot. The code can be found in [igibson/examples/observations/generate_lidar_velodyne.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/observations/generate_lidar_velodyne.py). +In this example, we show a demo of 16-beam Velodyne VLP-16 LiDAR placed on top of a virtual Turtlebot. The code can be found in [igibson/examples/demo/lidar_velodyne_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/lidar_velodyne_example.py). The Velodyne VLP-16 LiDAR visualization will look like this: ![lidar_velodyne.png](images/lidar_velodyne.png) #### Render to PyTorch Tensors -In this example, we show that MeshRenderer can directly render into a PyTorch tensor to maximize efficiency. PyTorch installation is required (otherwise, iGibson does not depend on PyTorch). The code can be found in [igibson/examples/renderer/mesh_renderer_gpu_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/renderer/mesh_renderer_gpu_example.py). - - -#### About the 3D Image - -The mode '3d' provides a 4-channeled image where the first three channels correspond to the x, y, and z coordinates of the pixels in the image. Because our code internally uses OpenGL for rendering, the coordinates are defined in the common convention of this framework: for a given image, the x axis points from left to right, the y axis points from bottom to top, and the z axis points in the opposite direction of the viewing direction of the camera. The camera is located at the location of the link frame "eyes" of the robot, but the orientation is different and defined in the following way: the x axis points along the viewing direction of the camera, the y axis points from right to left and the z axis points from bottom to top. The following code can be helpful to transform points between reference frames: - -``` -# Pose of the camera of the simulated robot in world frame -eye_pos, eye_orn = self.robot.links["eyes"].get_position_orientation() -camera_in_wf = quat2rotmat(xyzw2wxyz(eye_orn)) -camera_in_wf[:3,3] = eye_pos - -# Transforming coordinates of points from opengl frame to camera frame -camera_in_openglf = quat2rotmat(euler2quat(np.pi / 2.0, 0, -np.pi / 2.0)) - -# Pose of the simulated robot in world frame -robot_pos, robot_orn = self.robot.get_position_orientation() -robot_in_wf = quat2rotmat(xyzw2wxyz(robot_orn)) -robot_in_wf[:3, 3] = robot_pos - -# Pose of the camera in robot frame -cam_in_robot_frame = np.dot(np.linalg.inv(robot_in_wf), camera_in_wf) - -u = 0 -v = 0 -[td_image] = self.env.simulator.renderer.render(modes=('3d')) -point_in_openglf = td_image[u, v] -point_in_cf = np.dot(camera_in_openglf, point_in_openglf) -point_in_rf = np.dot(cam_in_robot_frame, point_in_cf) -point_in_wf = np.dot(robot_in_wf, point_in_rf) -``` - -#### About the Semantic Segmentation Image - -The mode 'seg' and 'ins_seg' provides a 4-channeled image where the first channel corresponds to the semantic segmentation and instance segmentation, respectively. The values are normalized between 0 and 1, with a normalizing constant of `MAX_CLASS_COUNT = 512` and `MAX_INSTANCE_COUNT = 1024` (defined in `utils/constants.py`). The following code is helpful to unnormalize the segmentation image: - -``` -[seg, ins_seg] = self.env.simulator.renderer.render(modes=('seg', 'ins_seg')) -seg = (seg[:, :, 0:1] * MAX_CLASS_COUNT).astype(np.int32) -ins_seg = (ins_seg[:, :, 0:1] * MAX_INSTANCE_COUNT).astype(np.int32) -``` +In this example, we show that MeshRenderer can directly render into a PyTorch tensor to maximize efficiency. PyTorch installation is required (otherwise, iGibson does not depend on PyTorch). The code can be found in [igibson/examples/demo/mesh_renderer_gpu_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/mesh_renderer_gpu_example.py). -This transformation is directly performed if the segmentation is accessed through a `VisionSensor` (e.g., as part of the iGibsonEnv) using the method `get_seg` and `get_ins_seg`. diff --git a/docs/robots.md b/docs/robots.md index 7dff1bc1c..2359ab89d 100644 --- a/docs/robots.md +++ b/docs/robots.md @@ -18,11 +18,11 @@ We provide a wide variety of **Robots** that can be imported into the **Simulato Typically, these robot classes take in the URDF file or MuJoCo XML file of an robot (in `igibson.assets_path`) and provide a `load` function that be invoked externally (usually by `import_robot` of `Simulator`). The `load` function imports the robot into PyBullet. -All robot clases inherit `LocomotionRobot`. Some useful functions are worth pointing out: +All robot clases inherit `LocomotorRobot`. Some useful functions are worth pointing out: - `{get/set}_{position/orientation/rpy/linear_velocity/angular_velocity}`: get and set the physical states of the robot base - `apply_robot_action`: set motor control for each of the controllable joints. It currently supports four modes of control: joint torque, velocity, position, and differential drive for two-wheeled robots - `calc_state`: compute robot states that might be useful for external applications -- `reset`: reset the robot joint states to their default value, particularly useful for mobile manipulators. For instance, `Fetch.reset()` will reset the robot to be something like this: +- `robot_specific_reset`: reset the robot joint states to their default value, particularly useful for mobile manipulators. For instance, `Fetch.robot_specific_reset()` will reset the robot to be something like this: ![fetch.png](images/fetch.png) @@ -33,8 +33,8 @@ Here are some details about how we perform motor control for robots: - `robot_action` will be applied by `apply_robot_action`, which internally executes the following: ```python def apply_robot_action(action): - for n, j in enumerate(self.joints.values()): - j.set_vel(self.velocity_coef * j.max_velocity * float(np.clip(action[n], -1, +1))) + for n, j in enumerate(self.ordered_joints): + j.set_motor_velocity(self.velocity_coef * j.max_velocity * float(np.clip(action[n], -1, +1))) ``` Note that `robot_action` is a normalized joint velocity, i.e. `robot_action[n] == 1.0` means executing the maximum joint velocity for the nth joint. The limits of joint position, velocity and torque are extracted from the URDF file of the robot. @@ -56,72 +56,74 @@ The reference frame of each body part is shown below. ### Examples -In this example, we import four different robots into PyBullet. We keep them still for around 10 seconds and then move them with small random actions for another 10 seconds. The code can be found here: [igibson/examples/robots/robot_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/robots/robot_example.py). +In this example, we import four different robots into PyBullet. We keep them still for around 10 seconds and then move them with small random actions for another 10 seconds. The code can be found here: [igibson/examples/demo/robot_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/robot_example.py). ```python -import logging +from igibson.robots.locobot_robot import Locobot +from igibson.robots.turtlebot_robot import Turtlebot +from igibson.robots.jr2_kinova_robot import JR2_Kinova +from igibson.robots.fetch_robot import Fetch +from igibson.utils.utils import parse_config import os - +import time import numpy as np - +import pybullet as p +import pybullet_data import igibson -from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings -from igibson.robots import REGISTERED_ROBOTS -from igibson.scenes.empty_scene import EmptyScene -from igibson.simulator import Simulator -from igibson.utils.utils import parse_config - def main(): - """ - Robot demo - Loads all robots in an empty scene, generate random actions - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - # Create empty scene - settings = MeshRendererSettings(enable_shadow=False, msaa=False, texture_scale=0.5) - s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings) - scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1]) - s.import_scene(scene) - - # Create one instance of each robot aligned along the y axis - position = [0, 0, 0] - robots = {} - for robot_config_file in os.listdir(os.path.join(igibson.example_config_path, "robots")): - config = parse_config(os.path.join(igibson.example_config_path, "robots", robot_config_file)) - robot_config = config["robot"] - robot_name = robot_config.pop("name") - robot = REGISTERED_ROBOTS[robot_name](**robot_config) - s.import_robot(robot) + p.connect(p.GUI) + p.setGravity(0, 0, -9.8) + p.setTimeStep(1./240.) + + floor = os.path.join(pybullet_data.getDataPath(), "mjcf/ground_plane.xml") + p.loadMJCF(floor) + + robots = [] + config = parse_config(os.path.join(igibson.example_config_path, 'fetch_reaching.yaml')) + fetch = Fetch(config) + robots.append(fetch) + + config = parse_config(os.path.join(igibson.example_config_path,'jr_reaching.yaml')) + jr = JR2_Kinova(config) + robots.append(jr) + + config = parse_config(os.path.join(igibson.example_config_path, 'locobot_point_nav.yaml')) + locobot = Locobot(config) + robots.append(locobot) + + config = parse_config(os.path.join(igibson.example_config_path, 'turtlebot_point_nav.yaml')) + turtlebot = Turtlebot(config) + robots.append(turtlebot) + + positions = [ + [0, 0, 0], + [1, 0, 0], + [0, 1, 0], + [1, 1, 0] + ] + + for robot, position in zip(robots, positions): + robot.load() robot.set_position(position) - robot.reset() + robot.robot_specific_reset() robot.keep_still() - robots[robot_name] = (robot, position[1]) - logging.info("Loaded " + robot_name) - logging.info("Moving " + robot_name) - # Set viewer in front - s.viewer.initial_pos = [1.6, 0, 1.3] - s.viewer.initial_view_direction = [-0.7, 0, -0.7] - s.viewer.reset_viewer() - - for _ in range(100): # keep still for 10 seconds - s.step() - - for _ in range(30): + + for _ in range(2400): # keep still for 10 seconds + p.stepSimulation() + time.sleep(1./240.) + + for _ in range(2400): # move with small random actions for 10 seconds + for robot, position in zip(robots, positions): action = np.random.uniform(-1, 1, robot.action_dim) robot.apply_action(action) - for _ in range(10): - s.step() - - robot.keep_still() - s.reload() - scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1]) - s.import_scene(scene) + p.stepSimulation() + time.sleep(1./240.0) - s.disconnect() + p.disconnect() -if __name__ == "__main__": +if __name__ == '__main__': main() ``` The four robots will have a fun cocktail party like this: diff --git a/docs/sampling.md b/docs/sampling.md deleted file mode 100644 index 7a4496317..000000000 --- a/docs/sampling.md +++ /dev/null @@ -1,29 +0,0 @@ -# Sampling Scene Instances - -The [logic states](extended_states.md) implemented in iGibson since the version 2.0 provide a mechanism that facilitates the generation of simulated scenes to study and develop robotic solutions. -Users of iGibson can specify the desired configuration of the environment in the logic language [BDDL](https://github.com/StanfordVL/bddl#readme), part of the [BEHAVIOR benchmark](behavior.stanford.edu). -This language has similarities to the Planning Domain Definition Language (PDDL), allowing researchers to define scenes in logic-semantic manner (e.g., objects on top, next, inside of others) instead of the time consuming and tedious work of specifying manually their positions. -Given a scene definition in BDDL, iGibson provides the functionalities to sample compliant instances of the logic description to be used in simulation. -The image below shows an example of different instances sampled from the same logic description (three books on a table). - -![sampling2.gif](images/sampling2.gif) - -The first step to generate a new activity in iGibson is to create its BDDL description. -Please, follow the instructions [here](https://behavior.stanford.edu/activity-annotation) to create your own BDDL description using our online web interface, or modify some of the existing descriptions included as part of BEHAVIOR (see [here](https://github.com/StanfordVL/bddl/tree/master/bddl/activity_definitions)). - -The next step is to download and install [BDDL](https://github.com/StanfordVL/bddl). Place your own BDDL description under `bddl/activity_definitions//problem0.bddl`. - -Then you can select a *vanilla scene* to instantiate the BDDL description on. -We provide 15 scenes as part of the iGibson dataset, furnished and with clutter objects to be specialized for multiple activities. See the available scenes in `ig_dataset/scenes` or [here](http://svl.stanford.edu/igibson/docs/dataset.html). - -With the BDDL description and a list of scene names, iGibson can generate valid scene instances of the description with the following script: -``` -python -m igibson.utils.data_utils.sampling_task.sampling_saver --task --task_id 0 --scenes ... -``` - -The script will sample possible poses of object models of the indicated classes until all conditions are fulfilled. -The result will be stored as `ig_dataset/scenes//urdf/_task__0_0.urdf`, a description of the scene with additional objects that fulfill the initial conditions in the BDDL description. -The user should ensure that the definition is sampleable in the given scene. Otherwise, after a certain number of sampling attempts, the script will fail and return. - -We recommend to use the BEHAVIOR Dataset of 3D objects to get access to hundreds of object models to create new activities. - diff --git a/docs/scenes.md b/docs/scenes.md index 4c8121c97..e90bd4274 100644 --- a/docs/scenes.md +++ b/docs/scenes.md @@ -6,7 +6,7 @@ We provide four types of scenes. - `StaticIndoorScene`: it loads static 3D scenes from `igibson.g_dataset_path`. - `InteractiveIndoorScene`: it loads fully interactive 3D scenes from `igibson.ig_dataset_path`. -Typically, they take in the `scene_id` of a scene and provide a `load` function that be invoked externally (usually by `import_scene` of the `Simulator`). +Typically, they take in the `scene_id` of a scene and provide a `load` function that be invoked externally (usually by `import_scene` and `import_ig_scene` of the `Simulator`). To be more specific, the `load` function of `StaticIndoorScene` - stores the floor information (we have many multistory houses in our dataset) @@ -35,37 +35,30 @@ Instruction can be found here: [External Scenes](https://github.com/StanfordVL/i #### Stadium Scenes -In this example, we import a simple stadium scene that is good for debugging. The code can be found here: [igibson/examples/scenes/stadium_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/scenes/stadium_example.py). +In this example, we import a simple stadium scene that is good for debugging. The code can be found here: [igibson/examples/demo/scene_stadium_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/scene_stadium_example.py). ```python -import logging - -from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings -from igibson.render.profiler import Profiler from igibson.scenes.stadium_scene import StadiumScene -from igibson.simulator import Simulator - +import pybullet as p +import numpy as np +import time def main(): - """ - Loads the Stadium scene - This scene is default in pybullet but is not really useful in iGibson - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - - settings = MeshRendererSettings(enable_shadow=False, msaa=False) - s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings) + p.connect(p.GUI) + p.setGravity(0,0,-9.8) + p.setTimeStep(1./240.) scene = StadiumScene() - s.import_scene(scene) + scene.load() - while True: - with Profiler("Simulator step"): - s.step() - s.disconnect() + for _ in range(24000): # at least 100 seconds + p.stepSimulation() + time.sleep(1./240.) + + p.disconnect() -if __name__ == "__main__": +if __name__ == '__main__': main() ``` @@ -74,156 +67,98 @@ The stadium scene looks like this: #### Static Building Scenes -In this example, we import a static scene, and then randomly sample a pair of locations in the scene and compuete the shortest path between them. The code can be found here: [igibson/examples/scenes/g_scene_selector.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/scenes/g_scene_selector.py). +In this example, we import a static scene, and then randomly sample a pair of locations in the scene and compuete the shortest path between them. The code can be found here: [igibson/examples/demo/scene_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/scene_example.py). ```python -import logging -from sys import platform - +from igibson.scenes.gibson_indoor_scene import StaticIndoorScene +import pybullet as p import numpy as np +import time -from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings -from igibson.render.profiler import Profiler -from igibson.scenes.gibson_indoor_scene import StaticIndoorScene -from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene -from igibson.simulator import Simulator -from igibson.utils.assets_utils import get_available_g_scenes -from igibson.utils.utils import let_user_pick +def main(): + p.connect(p.GUI) + p.setGravity(0,0,-9.8) + p.setTimeStep(1./240.) + scene = BuildingScene('Rs', + build_graph=True, + pybullet_load_texture=True) + scene.load() -def main(): - """ - Prompts the user to select any available non-interactive scene and loads it. - Shows how to load directly scenes without the Environment interface - Shows how to sample points in the scene and how to compute geodesic distance and the shortest path - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - available_g_scenes = get_available_g_scenes() - scene_id = available_g_scenes[let_user_pick(available_g_scenes) - 1] - settings = MeshRendererSettings(enable_shadow=True, msaa=False) - # Reduce texture scale for Mac. - if platform == "darwin": - settings.texture_scale = 0.5 - s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings) - - scene = StaticIndoorScene( - scene_id, - build_graph=True, - ) - s.import_scene(scene) - - # Shows how to sample points in the scene np.random.seed(0) for _ in range(10): random_floor = scene.get_random_floor() - p1 = scene.get_random_point(random_floor)[1] - p2 = scene.get_random_point(random_floor)[1] + p1 = scene.get_random_point(floor=random_floor)[1] + p2 = scene.get_random_point(floor=random_floor)[1] shortest_path, geodesic_distance = scene.get_shortest_path(random_floor, p1[:2], p2[:2], entire_path=True) - logging.info("Random point 1: {}".format(p1)) - logging.info("Random point 2: {}".format(p2)) - logging.info("Geodesic distance between p1 and p2: {}".format(geodesic_distance)) - logging.info("Shortest path from p1 to p2: {}".format(shortest_path)) + print('random point 1:', p1) + print('random point 2:', p2) + print('geodesic distance between p1 and p2', geodesic_distance) + print('shortest path from p1 to p2:', shortest_path) - input("Press enter") + for _ in range(24000): # at least 100 seconds + p.stepSimulation() + time.sleep(1./240.) - while True: - with Profiler("Simulator step"): - s.step() - s.disconnect() + p.disconnect() -if __name__ == "__main__": +if __name__ == '__main__': main() ``` - #### Interactive Building Scenes -In this example, we import a fully interactive scene, and randomly sample points given a room type such as "living_room". This can be useful for tasks that require the robot to always be spawned in certain room types. We support fifteen such scenes right now as part of the new iGibson Dataset. The code can be found here: [igibson/examples/scenes/ig_scene_selector.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/scenes/ig_scene_selector.py). +In this example, we import a fully interactive scene, and randomly sample points given a room type such as "living_room". This can be useful for tasks that require the robot to always be spawned in certain room types. We support fifteen such scenes right now as part of the new iGibson Dataset. The code can be found here: [igibson/examples/demo/scene_interactive_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/scene_interactive_example.py). Note that all objects in these scenes can be interacted realistically. ![scene_interactive.png](images/scene_interactive.png) ```python -import logging -from sys import platform - -import numpy as np - -from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings -from igibson.render.profiler import Profiler from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene from igibson.simulator import Simulator -from igibson.utils.assets_utils import get_available_ig_scenes -from igibson.utils.utils import let_user_pick +import numpy as np def main(): - """ - Prompts the user to select any available interactive scene and loads it. - Shows how to load directly scenes without the Environment interface - Shows how to sample points in the scene by room type and how to compute geodesic distance and the shortest path - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - available_ig_scenes = get_available_ig_scenes() - scene_id = available_ig_scenes[let_user_pick(available_ig_scenes) - 1] - settings = MeshRendererSettings(enable_shadow=True, msaa=False) - if platform == "darwin": - settings.texture_scale = 0.5 - s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings) - + s = Simulator(mode='gui', image_width=512, + image_height=512, device_idx=0) scene = InteractiveIndoorScene( - scene_id, - load_object_categories=[], # To load only the building. Fast - build_graph=True, - ) - s.import_scene(scene) + 'Rs_int', texture_randomization=False, object_randomization=False) + s.import_ig_scene(scene) - # Shows how to sample points in the scene np.random.seed(0) for _ in range(10): - pt = scene.get_random_point_by_room_type("living_room")[1] - logging.info("Random point in living_room: {}".format(pt)) + pt = scene.get_random_point_by_room_type('living_room')[1] + print('random point in living_room', pt) - for _ in range(10): - random_floor = scene.get_random_floor() - p1 = scene.get_random_point(random_floor)[1] - p2 = scene.get_random_point(random_floor)[1] - shortest_path, geodesic_distance = scene.get_shortest_path(random_floor, p1[:2], p2[:2], entire_path=True) - logging.info("Random point 1: {}".format(p1)) - logging.info("Random point 2: {}".format(p2)) - logging.info("Geodesic distance between p1 and p2: {}".format(geodesic_distance)) - logging.info("Shortest path from p1 to p2: {}".format(shortest_path)) - - input("Press enter") - - while True: - with Profiler("Simulator step"): - s.step() + for _ in range(1000): + s.step() s.disconnect() -if __name__ == "__main__": +if __name__ == '__main__': main() + ``` ##### Texture Randomization -In this example, we demonstrate material/texture randomization functionality of `InteractiveIndoorScene`. The goal is to randomize the material, texture and dynamic properties of all scene objects by calling `scene.randomize_texture` on-demand. The code can be found here: [igibson/examples/scenes/scene_texture_rand_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/scenes/scene_texture_rand_example.py ). +In this example, we demonstrate material/texture randomization functionality of `InteractiveIndoorScene`. The goal is to randomize the material, texture and dynamic properties of all scene objects by calling `scene.randomize_texture` on-demand. The code can be found here: [igibson/examples/demo/scene_interactive_texture_rand_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/scene_interactive_texture_rand_example.py). The randomized materials in the `ExternalView` window should look like this. ![scene_interactive_texture_rand](images/scene_interactive_texture_rand.png) ##### Object Randomization -In this example, we demonstrate object randomization functionality of `InteractiveIndoorScene`. The goal is to randomize the object models while maintaining their poses and categories. Note that when object models are randomized, there is no guarantee that they have no collisions or the fixed, articulated objects can extend their joints without collision. We provide `scene.check_scene_quality` functionality to check scene quality and you should do object model re-sampling if this function returns `False`. An alternative way (recommended) is to use randoml object model configuration that we provide (10 for each scenes) which guarantees scene quality, by passing in `object_randomization_idx=[0-9]`. Finally, object randomization can be expensive because the new object models need to be loaded to the simulator each time, so we recommend only using it occasionally (e.g. every 1000 training episodes). The code can be found here: [igibson/examples/scenes/scene_texture_rand_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/scenes/scene_texture_rand_example.py). +In this example, we demonstrate object randomization functionality of `InteractiveIndoorScene`. The goal is to randomize the object models while maintaining their poses and categories. Note that when object models are randomized, there is no guarantee that they have no collisions or the fixed, articulated objects can extend their joints without collision. We provide `scene.check_scene_quality` functionality to check scene quality and you should do object model re-sampling if this function returns `False`. An alternative way (recommended) is to use randoml object model configuration that we provide (10 for each scenes) which guarantees scene quality, by passing in `object_randomization_idx=[0-9]`. Finally, object randomization can be expensive because the new object models need to be loaded to the simulator each time, so we recommend only using it occasionally (e.g. every 1000 training episodes). The code can be found here: [igibson/examples/demo/scene_interactive_object_rand_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/scene_interactive_object_rand_example.py). The randomized object models in the `ExternalView` window should look like this. ![scene_interactive_object_rand](images/scene_interactive_object_rand.png) ##### Partial Scene Loading -In this example, we demonstrate partial scene loading functionality of `InteractiveIndoorScene`. Specifically in this example we only load "chairs" in "living rooms". This can be useful for tasks that only require certain object categories or rooms. The code can be found here: [igibson/examples/scenes/scene_partial_loading_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/scenes/scene_partial_loading_example.py). +In this example, we demonstrate partial scene loading functionality of `InteractiveIndoorScene`. Specifically in this example we only load "chairs" in "living rooms". This can be useful for tasks that only require certain object categories or rooms. The code can be found here: [igibson/examples/demo/scene_interactive_partial_loading_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/scene_interactive_partial_loading_example.py). #### Visualize Traversability Map -In this example, we visuliaze the traversability map of a scene. We use this map to build an internal traversability graph for each floor so that we can compute the shortest path between two locations, and place robots and objects at valid locations inside the scene. The code can be found here: [igibson/examples/robots/trav_map_vis_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/robots/trav_map_vis_example.py). +In this example, we visuliaze the traversability map of a scene. We use this map to build an internal traversability graph for each floor so that we can compute the shortest path between two locations, and place robots and objects at valid locations inside the scene. The code can be found here: [igibson/examples/demo/trav_map_vis_example.py](https://github.com/StanfordVL/iGibson/blob/master/examples/trav_map_vis_example.py). The traversability map of the scene `Rs` looks like this: ![trav_map_vis](images/trav_map_vis.png) diff --git a/docs/simulators.md b/docs/simulators.md index c94ae25de..e460e3e58 100644 --- a/docs/simulators.md +++ b/docs/simulators.md @@ -1,4 +1,4 @@ -# Simulator +# Simulators ### Overview @@ -6,7 +6,7 @@ Some key functions are the following: - `load`: initialize PyBullet physics engine and MeshRenderer -- `import_{scene, ig_scene}`: import the scene into PyBullet by calling `scene.load`, and then import it into MeshRenderer by calling `self.renderer.add_instance_group`. If `InteractiveIndoorScene` is imported using `import_scene`, all objects in the scene are also imported. +- `import_{scene, ig_scene}`: import the scene into PyBullet by calling `scene.load`, and then import it into MeshRenderer by calling `self.renderer.add_instance`. If `InteractiveIndoorScene` is imported using `import_ig_scene`, all objects in the scene are also imported. - `import_{object, articulated_object, robot}`: import the object, articulated object and robot into the simulator in a similar manner - `sync`: synchronize the poses of the dynamic objects (including the robots) between PyBullet and MeshRenderer. Specifically, it calls `update_position` for each object, in which it retrieve the object's pose in PyBullet, and then update its pose accordingly in MeshRenderer. @@ -15,10 +15,10 @@ If `Simulator` uses `gui` mode, by default it will also maintain a `Viewer`, whi Most of the code can be found here: [igibson/simulator.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/simulator.py). ### Examples -In this example, we import a `StaticIndoorScene`, a `Turtlebot`, and ten `YCBObject` into the simulator. +In this example, we import a `StaticIndoorScene`, a `Turtlebot`, and ten `YCBObject` into the simulator. The code can be found here: [igibson/examples/demo/simulator_example.py](https://github.com/StanfordVL/iGibson/blob/master/igibson/examples/demo/simulator_example.py) ```python -from igibson.robots.turtlebot import Turtlebot +from igibson.robots.turtlebot_robot import Turtlebot from igibson.simulator import Simulator from igibson.scenes.gibson_indoor_scene import StaticIndoorScene from igibson.objects.ycb_object import YCBObject @@ -30,9 +30,9 @@ from IPython import embed def main(): - config = parse_config('../configs/turtlebot_static_nav.yaml') + config = parse_config('../configs/turtlebot_demo.yaml') settings = MeshRendererSettings(enable_shadow=False, msaa=False) - s = Simulator(mode='gui_interactive', image_width=256, + s = Simulator(mode='gui', image_width=256, image_height=256, rendering_settings=settings) scene = StaticIndoorScene('Rs', diff --git a/docs/tests.md b/docs/tests.md index be3f66706..bc43b6913 100644 --- a/docs/tests.md +++ b/docs/tests.md @@ -1,8 +1,8 @@ -# Tests and Examples -### Tests -We provide tests in [this directory](https://github.com/StanfordVL/iGibson/tree/master/tests) in our repository. -You can run them executing: +# Tests +### Overview +We provide tests in [test](https://github.com/StanfordVL/iGibson/tree/master/igibson/test). You can run them by this: ```bash +cd igibson/test pytest --ignore disabled --ignore benchmark ``` It will take a few minutes. If all tests pass, you will see something like this @@ -21,3 +21,4 @@ test_scene_importing.py .... [ 9 test_simulator.py . [ 96% ] test_viewer.py ``` + diff --git a/docs/viewer.md b/docs/viewer.md index 0335485ac..a312211c4 100644 --- a/docs/viewer.md +++ b/docs/viewer.md @@ -2,24 +2,19 @@ ### Overview -We developed an easy-to-use iGibson-human interface called **Viewer** for users to inspect and interact with our scenes and objects. The Viewer will automatically pop up if you use `gui_non_interactive` or `gui_interactive` mode in `Simulator`. +We developed an easy-to-use iGibson-human interface called **Viewer** for users to inspect and interact with our scenes and objects. The Viewer will automatically pop up if you use `gui` or `iggui` mode in `Simulator`. To play with an example of viewer, you can run the following command: ```bash -python -m igibson.examples.environments.env_int_example +python -m igibson.examples.demo.mouse_interaction ``` -or -```bash -python -m igibson.examples.environments.env_nonint_example -``` -after downloading the Rs_int or Rs scenes (interactive or non-interactive, see [the installation instructions](installation.md)) ![viewer.png](images/viewer.png) -In example image above, you can see on the top left corner `px 0.4 py -0.9 pz 1.2`, which indicates the camera position, `[1.0, 0.1, -0.1]`, which indicates the camera viewing direction, and `manip mode`, which indicates the current control mode you are in. There are three modes that we explain below. +On the top left corner, you can see `px 0.4 py -0.9 pz 1.2`, which indicates the camera position, `[1.0, 0.1, -0.1]`, which indicates the camera orientation, and `manip mode`, which indicates the current control mode you are in (explained below). Keyboard control includes the following -- `W`, `A`, `S`, `D`, `T`, `G`: translate forward/left/backward/right/up/down +- `W`, `A`, `S`, `D`: translate forward/left/backward/right - `Q`, `E`: rotate left/right - `M`: choose between different control mode (navigation, manipulation and planning) - `R`: start/stop recording @@ -45,4 +40,4 @@ Mouse control in planning mode In manipulation and planning modes, a visual indicator will be visualized in the `Viewer` to assist control (e.g. the blue sphere at the bottom in the image above). -The code can be found in [igibson/render/viewer.py](https://github.com/StanfordVL/iGibson/tree/master/igibson/render/viewer.py). +Most of the code can be found in [igibson/render/viewer.py](https://github.com/StanfordVL/iGibson/tree/master/igibson/render/viewer.py). diff --git a/docs/virtual_reality.md b/docs/virtual_reality.md index 29b6259c2..fb4e9a4a8 100644 --- a/docs/virtual_reality.md +++ b/docs/virtual_reality.md @@ -1,39 +1,38 @@ ### Virtual Reality Overview -Virtual reality is currently supported on Windows 10/11 on the HTC Vive and Oculus Rift/Quest (with link), and on Linux with the HTC Vive (there is no linux driver for Oculus). +Virtual reality is currently only supported on Windows 10/11 on the HTC Vive and Oculus Rift/Quest (with link). -The HTC Vive Pro Eye tracking driver is not available for Linux. You must have the latest Nvidia driver 470.XX and SteamVR 1.19.7 as asynchronous reprojection (a form of motion smoothing necessary for a usable VR experience) was only added in mid 2021. +Linux support is planned, but the HTC Vive Pro Eye tracking driver is not available for Linux, and currently aysnchronous projection/motion smoothing is not supported on the Nvidia proprietary drivers on linux. ### Setup 1. Set up the HTC Vive VR hardware according to the [setup guide](https://support.steampowered.com/steamvr/HTC_Vive/) 2. (optional) if you plan to use eye tracking on Windows, create a [vive developer account](https://hub.vive.com/sso/login) then download and install the [SRAnipal runtime](https://developer.vive.com/resources/vive-sense/sdk/vive-eye-and-facial-tracking-sdk/). Note you should [calibrate](https://developer.vive.com/us/support/sdk/category_howto/how-to-calibrate-eye-tracking.html) the Vive eye tracker before each recording session. -3. Ensure you have installed iGibson according to the installation [instructions](http://svl.stanford.edu/igibson/docs/installation.html#installation-method). - * Note: On Windows VR support is enabled by default. On Linux, you must install with an additional environmental variable `USE_VR=TRUE pip install -e .`. You must also have addition development headers installed: on Ubuntu `sudo apt install xorg-dev` and on Centos/Fedora: `sudo dnf install libXinerama-devel libXi-devel libXrandr-devel libXcursor-devel`. +3. Ensure you have installed iGibson according to the installation [instructions](http://svl.stanford.edu/igibson/docs/installation.html#installation-method) ### VR examples We have several examples showing how to use our VR interface: -* vr demo files: `igibson/examples/vr` +* vr demo files: `igibson/examples/demo/vr_demos` -* multi-user VR (experimental): `igibson/examples/vr/muvr` +* multi-user VR (experimental): `igibson/examples/demo/vr_demos/muvr` -* benchmark: `igibson/examples/vr/in_development` +* benchmark: `igibson/examples/demo/vr_demos/in_development` - `vr_hand_dex_benchmark.py` -- Demonstrates various challenging manipulation tasks - `vr_hand_speed_benchmark.py` -- Demonstrates various tasks to assess user speed at working in VR -* data_save_replay: `igibson/examples/vr/data_save_replay` - - This folder demonstrates how to save VR recordings and "replay" them to reproduce the recorded trajectory +* data_save_replay: `igibson/examples/demo/vr_demos/data_save_replay` + - This folder demonstrates how to save VR recordings and "replay" them to reproduce the reorded trajectory -* robot_embodiment: `igibson/examples/vr/robot_embodiment` +* robot_embodiment: `igibson/examples/demo/vr_demos/robot_embodiment` - This folder demonstrates the VR interface to the Fetch robot (WIP) Benchmarks: -We have two benchmarks - a hand and speed benchmark, both of which can be found in the top level of the vr examples folder. In these demos, you can time yourself performing various challenges, +We have two benchmarks - a hand and speed benchmark, both of which can be found in the top level of the vr_demos folder. In these demos, you can time yourself performing various challenges, such as putting objects away into specific containers/cabinets. Please see the comments in these demo files for more information. ### VR config and button mapping: @@ -54,7 +53,7 @@ Some additional options you may be interested in changing: We recommend looking at `igibson/render/mesh_renderer/mesh_renderer_vr.py` to see the VrSettings class which reads from `vr_config.yaml`. A single VrSettings object is created and passed in to the `Simulator` constructor. -Note(optional): If you are using a device not already mapped, please run `igibson/examples/vr/in_development/vr_button_mapping.py` to figure out which physical controller buttons correspond to which indices in OpenVR. +Note(optional): If you are using a device not already mapped, please run `igibson/examples/demo/vr_demos/in_development/vr_button_mapping.py` to figure out which physical controller buttons correspond to which indices in OpenVR. ### Mirroring the VR view on the monitor diff --git a/igibson/__init__.py b/igibson/__init__.py index a3348ce75..bde107b0b 100644 --- a/igibson/__init__.py +++ b/igibson/__init__.py @@ -3,7 +3,7 @@ import yaml -__version__ = "2.0.4" +__version__ = "2.0.1" logging.getLogger().setLevel(logging.INFO) @@ -62,18 +62,19 @@ if not os.path.isabs(key_path): key_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), key_path) -logging.info("Importing iGibson (igibson module)") -logging.info("Assets path: {}".format(assets_path)) -logging.info("Gibson Dataset path: {}".format(g_dataset_path)) -logging.info("iG Dataset path: {}".format(ig_dataset_path)) -logging.info("3D-FRONT Dataset path: {}".format(threedfront_dataset_path)) -logging.info("CubiCasa5K Dataset path: {}".format(cubicasa_dataset_path)) -logging.info("iGibson Key path: {}".format(key_path)) +# NOTE: njk: turned off below logging prints to avoid cluttering terminal when running predicators code. +# logging.info("Importing iGibson (igibson module)") +# logging.info("Assets path: {}".format(assets_path)) +# logging.info("Gibson Dataset path: {}".format(g_dataset_path)) +# logging.info("iG Dataset path: {}".format(ig_dataset_path)) +# logging.info("3D-FRONT Dataset path: {}".format(threedfront_dataset_path)) +# logging.info("CubiCasa5K Dataset path: {}".format(cubicasa_dataset_path)) +# logging.info("iGibson Key path: {}".format(key_path)) example_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "examples") -example_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs") +example_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "examples", "configs") -logging.info("Example path: {}".format(example_path)) -logging.info("Example config path: {}".format(example_config_path)) +# logging.info("Example path: {}".format(example_path)) +# logging.info("Example config path: {}".format(example_config_path)) debug_sampling = False diff --git a/igibson/examples/environments/__init__.py b/igibson/activity/__init__.py similarity index 100% rename from igibson/examples/environments/__init__.py rename to igibson/activity/__init__.py diff --git a/igibson/activity/activity_base.py b/igibson/activity/activity_base.py new file mode 100644 index 000000000..a39e7d008 --- /dev/null +++ b/igibson/activity/activity_base.py @@ -0,0 +1,974 @@ +import logging +from collections import OrderedDict + +import networkx as nx +import pybullet as p +from bddl.activity_base import BEHAVIORActivityInstance +from bddl.condition_evaluation import Negation +from bddl.logic_base import AtomicFormula + +import igibson +from igibson.external.pybullet_tools.utils import * +from igibson.object_states.on_floor import RoomFloor +from igibson.objects.articulated_object import URDFObject +from igibson.objects.multi_object_wrappers import ObjectGrouper, ObjectMultiplexer +from igibson.robots.behavior_robot import BehaviorRobot +from igibson.robots.fetch_gripper_robot import FetchGripper +from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene +from igibson.simulator import Simulator +from igibson.utils.assets_utils import get_ig_avg_category_specs, get_ig_category_path, get_ig_model_path +from igibson.utils.checkpoint_utils import load_internal_states, save_internal_states +from igibson.utils.constants import ( + AGENT_POSE_DIM, + FLOOR_SYNSET, + MAX_TASK_RELEVANT_OBJS, + NON_SAMPLEABLE_OBJECTS, + TASK_RELEVANT_OBJS_OBS_DIM, +) + +KINEMATICS_STATES = frozenset({"inside", "ontop", "under", "onfloor"}) + + +class iGBEHAVIORActivityInstance(BEHAVIORActivityInstance): + def __init__( + self, + behavior_activity, + activity_definition=0, + predefined_problem=None, + robot_type=BehaviorRobot, + robot_config={}, + ): + """ + Initialize simulator with appropriate scene and sampled objects. + :param behavior_activity: string, official ATUS activity label + :param activity_definition: int, specific instance of behavior_activity init/final conditions + optional, randomly generated if not specified + :param predefined_problem: string, in format of a BEHAVIOR problem file read + """ + super().__init__( + behavior_activity, + activity_definition=activity_definition, + scene_path=os.path.join(igibson.ig_dataset_path, "scenes"), + predefined_problem=predefined_problem, + ) + self.state_history = {} + self.robot_type = robot_type + self.robot_config = robot_config + + def initialize_simulator( + self, + simulator=None, + mode="headless", + scene_id=None, + scene_kwargs=None, + load_clutter=False, + should_debug_sampling=False, + online_sampling=True, + ): + """ + Get scene populated with objects such that scene satisfies initial conditions + :param simulator: Simulator class, populated simulator that should completely + replace this function. Use if you would like to bypass internal + Simulator instantiation and population based on initial conditions + and use your own. Warning that if you use this option, we cannot + guarantee that the final conditions will be reachable. + """ + # Set self.scene_name, self.scene, self.sampled_simulator_objects, and self.sampled_dsl_objects + if simulator is None: + self.simulator = Simulator(mode=mode, image_width=960, image_height=720, device_idx=0) + else: + self.simulator = simulator + self.load_clutter = load_clutter + self.should_debug_sampling = should_debug_sampling + if online_sampling: + scene_kwargs["merge_fixed_links"] = False + result = self.initialize( + InteractiveIndoorScene, + scene_id=scene_id, + scene_kwargs=scene_kwargs, + online_sampling=online_sampling, + ) + self.initial_state = self.save_scene() + self.task_obs_dim = MAX_TASK_RELEVANT_OBJS * TASK_RELEVANT_OBJS_OBS_DIM + AGENT_POSE_DIM + return result + + def save_scene(self): + snapshot_id = p.saveState() + self.state_history[snapshot_id] = save_internal_states(self.simulator) + return snapshot_id + + def reset_scene(self, snapshot_id): + p.restoreState(snapshot_id) + load_internal_states(self.simulator, self.state_history[snapshot_id]) + + def check_scene(self): + feedback = {"init_success": "yes", "goal_success": "untested", "init_feedback": "", "goal_feedback": ""} + self.newly_added_objects = set() + room_type_to_obj_inst = {} + self.non_sampleable_object_inst = set() + for cond in self.parsed_initial_conditions: + if cond[0] == "inroom": + obj_inst, room_type = cond[1], cond[2] + obj_cat = self.obj_inst_to_obj_cat[obj_inst] + if obj_cat not in NON_SAMPLEABLE_OBJECTS: + error_msg = "You have assigned room type for [{}], but [{}] is sampleable. Only non-sampleable objects can have room assignment.".format( + obj_cat, obj_cat + ) + logging.warning(error_msg) + feedback["init_success"] = "no" + feedback["init_feedback"] = error_msg + return False, feedback + # Room type missing in the scene + if room_type not in self.scene.room_sem_name_to_ins_name: + error_msg = "Room type [{}] missing in scene [{}].".format(room_type, self.scene.scene_id) + logging.warning(error_msg) + feedback["init_success"] = "no" + feedback["init_feedback"] = error_msg + return False, feedback + + if room_type not in room_type_to_obj_inst: + room_type_to_obj_inst[room_type] = [] + + room_type_to_obj_inst[room_type].append(obj_inst) + if obj_inst in self.non_sampleable_object_inst: + error_msg = "Object [{}] has more than one room assignment".format(obj_inst) + logging.warning(error_msg) + feedback["init_success"] = "no" + feedback["init_feedback"] = error_msg + return False, feedback + self.non_sampleable_object_inst.add(obj_inst) + + self.sampling_orders = [] + cur_batch = self.non_sampleable_object_inst + while len(cur_batch) > 0: + self.sampling_orders.append(cur_batch) + next_batch = set() + for cond in self.parsed_initial_conditions: + if len(cond) == 3 and cond[2] in cur_batch: + next_batch.add(cond[1]) + cur_batch = next_batch + + if len(self.sampling_orders) > 0: + remaining_objs = self.object_scope.keys() - set.union(*self.sampling_orders) + else: + remaining_objs = self.object_scope.keys() + + if len(remaining_objs) != 0: + error_msg = "Some objects do not have any kinematic condition defined for them in the initial conditions: {}".format( + ", ".join(remaining_objs) + ) + logging.warning(error_msg) + feedback["init_success"] = "no" + feedback["init_feedback"] = error_msg + return False, feedback + + for obj_cat in self.objects: + if obj_cat not in NON_SAMPLEABLE_OBJECTS: + continue + for obj_inst in self.objects[obj_cat]: + if obj_inst not in self.non_sampleable_object_inst: + error_msg = ( + "All non-sampleable objects should have room assignment. [{}] does not have one.".format( + obj_inst + ) + ) + logging.warning(error_msg) + feedback["init_success"] = "no" + feedback["init_feedback"] = error_msg + return False, feedback + + room_type_to_scene_objs = {} + for room_type in room_type_to_obj_inst: + room_type_to_scene_objs[room_type] = {} + for obj_inst in room_type_to_obj_inst[room_type]: + room_type_to_scene_objs[room_type][obj_inst] = {} + obj_cat = self.obj_inst_to_obj_cat[obj_inst] + # We allow burners to be used as if they are stoves + categories = self.object_taxonomy.get_subtree_igibson_categories(obj_cat) + if obj_cat == "stove.n.01": + categories += self.object_taxonomy.get_subtree_igibson_categories("burner.n.01") + for room_inst in self.scene.room_sem_name_to_ins_name[room_type]: + if obj_cat == FLOOR_SYNSET: + # TODO: remove after split floors + # Create a RoomFloor for each room instance + # This object is NOT imported by the simulator + room_floor = RoomFloor( + category="room_floor", + name="room_floor_{}".format(room_inst), + scene=self.scene, + room_instance=room_inst, + floor_obj=self.scene.objects_by_name["floors"], + ) + scene_objs = [room_floor] + else: + room_objs = [] + if room_inst in self.scene.objects_by_room: + room_objs = self.scene.objects_by_room[room_inst] + scene_objs = [obj for obj in room_objs if obj.category in categories] + if len(scene_objs) != 0: + room_type_to_scene_objs[room_type][obj_inst][room_inst] = scene_objs + + # Store options for non-sampleable objects in self.non_sampleable_object_scope + # { + # "table1": { + # "living_room_0": [URDFObject, URDFObject, URDFObject], + # "living_room_1": [URDFObject] + # }, + # "table2": { + # "living_room_0": [URDFObject, URDFObject], + # "living_room_1": [URDFObject, URDFObject] + # }, + # "chair1": { + # "living_room_0": [URDFObject], + # "living_room_1": [URDFObject] + # }, + # } + for room_type in room_type_to_scene_objs: + # For each room_type, filter in room_inst that has non-empty + # options for all obj_inst in this room_type + room_inst_satisfied = set.intersection( + *[ + set(room_type_to_scene_objs[room_type][obj_inst].keys()) + for obj_inst in room_type_to_scene_objs[room_type] + ] + ) + if len(room_inst_satisfied) == 0: + error_msg = "Room type [{}] of scene [{}] does not contain all the objects needed.\nThe following are the possible room instances for each object, the intersection of which is an empty set.\n".format( + room_type, self.scene.scene_id + ) + for obj_inst in room_type_to_scene_objs[room_type]: + error_msg += ( + "{}: ".format(obj_inst) + ", ".join(room_type_to_scene_objs[room_type][obj_inst].keys()) + "\n" + ) + logging.warning(error_msg) + feedback["init_success"] = "no" + feedback["init_feedback"] = error_msg + return False, feedback + + for obj_inst in room_type_to_scene_objs[room_type]: + room_type_to_scene_objs[room_type][obj_inst] = { + key: val + for key, val in room_type_to_scene_objs[room_type][obj_inst].items() + if key in room_inst_satisfied + } + + self.non_sampleable_object_scope = room_type_to_scene_objs + + num_new_obj = 0 + # Only populate self.object_scope for sampleable objects + avg_category_spec = get_ig_avg_category_specs() + for obj_cat in self.objects: + if obj_cat == "agent.n.01": + continue + if obj_cat in NON_SAMPLEABLE_OBJECTS: + continue + is_sliceable = self.object_taxonomy.has_ability(obj_cat, "sliceable") + categories = self.object_taxonomy.get_subtree_igibson_categories(obj_cat) + + # TODO: temporary hack + remove_categories = [ + "pop_case", # too large + "jewel", # too small + "ring", # too small + ] + for remove_category in remove_categories: + if remove_category in categories: + categories.remove(remove_category) + + if is_sliceable: + categories = [cat for cat in categories if "half_" not in cat] + + for obj_inst in self.objects[obj_cat]: + category = np.random.choice(categories) + category_path = get_ig_category_path(category) + model_choices = os.listdir(category_path) + + # Filter object models if the object category is openable + synset = self.object_taxonomy.get_class_name_from_igibson_category(category) + if self.object_taxonomy.has_ability(synset, "openable"): + model_choices = [m for m in model_choices if "articulated_" in m] + if len(model_choices) == 0: + error_msg = "{} is Openable, but does not have articulated models.".format(category) + logging.warning(error_msg) + feedback["init_success"] = "no" + feedback["init_feedback"] = error_msg + return False, feedback + + model = np.random.choice(model_choices) + + # for "collecting aluminum cans", we need pop cans (not bottles) + if category == "pop" and self.behavior_activity in ["collecting_aluminum_cans"]: + model = np.random.choice([str(i) for i in range(40, 46)]) + if category == "spoon" and self.behavior_activity in ["polishing_silver"]: + model = np.random.choice([str(i) for i in [2, 5, 6]]) + + model_path = get_ig_model_path(category, model) + filename = os.path.join(model_path, model + ".urdf") + obj_name = "{}_{}".format(category, len(self.scene.objects_by_name)) + simulator_obj = URDFObject( + filename, + name=obj_name, + category=category, + model_path=model_path, + avg_obj_dims=avg_category_spec.get(category), + fit_avg_dim_volume=True, + texture_randomization=False, + overwrite_inertial=True, + initial_pos=[100 + num_new_obj, 100, -100], + ) + num_new_obj += 1 + + if is_sliceable: + whole_object = simulator_obj + object_parts = [] + assert "object_parts" in simulator_obj.metadata, "object_parts not found in metadata: [{}]".format( + model_path + ) + + for i, part in enumerate(simulator_obj.metadata["object_parts"]): + category = part["category"] + model = part["model"] + # Scale the offset accordingly + part_pos = part["pos"] * whole_object.scale + part_orn = part["orn"] + model_path = get_ig_model_path(category, model) + filename = os.path.join(model_path, model + ".urdf") + obj_name = whole_object.name + "_part_{}".format(i) + simulator_obj_part = URDFObject( + filename, + name=obj_name, + category=category, + model_path=model_path, + avg_obj_dims=avg_category_spec.get(category), + fit_avg_dim_volume=False, + scale=whole_object.scale, + texture_randomization=False, + overwrite_inertial=True, + initial_pos=[100 + num_new_obj, 100, -100], + ) + num_new_obj += 1 + object_parts.append((simulator_obj_part, (part_pos, part_orn))) + + assert len(object_parts) > 0 + grouped_obj_parts = ObjectGrouper(object_parts) + simulator_obj = ObjectMultiplexer( + whole_object.name + "_multiplexer", [whole_object, grouped_obj_parts], 0 + ) + + if not self.scene.loaded: + self.scene.add_object(simulator_obj) + else: + self.simulator.import_object(simulator_obj) + self.newly_added_objects.add(simulator_obj) + self.object_scope[obj_inst] = simulator_obj + + return True, feedback + + def import_agent(self, use_ghost_hands = False): + cached_initial_pose = not self.online_sampling and self.scene.agent != {} + if self.robot_type == BehaviorRobot: + agent = BehaviorRobot(self.simulator) + self.simulator.import_behavior_robot(agent) + agent.set_position_orientation([300, 300, 300], [0, 0, 0, 1]) + self.object_scope["agent.n.01_1"] = agent.parts["body"] + if cached_initial_pose: + agent.parts["body"].set_base_link_position_orientation( + self.scene.agent["BRBody_1"]["xyz"], quat_from_euler(self.scene.agent["BRBody_1"]["rpy"]) + ) + agent.parts["left_hand"].set_base_link_position_orientation( + self.scene.agent["left_hand_1"]["xyz"], quat_from_euler(self.scene.agent["left_hand_1"]["rpy"]) + ) + agent.parts["right_hand"].set_base_link_position_orientation( + self.scene.agent["right_hand_1"]["xyz"], quat_from_euler(self.scene.agent["right_hand_1"]["rpy"]) + ) + if use_ghost_hands: + agent.parts["left_hand"].ghost_hand.set_base_link_position_orientation( + self.scene.agent["left_hand_1"]["xyz"], quat_from_euler(self.scene.agent["left_hand_1"]["rpy"]) + ) + agent.parts["right_hand"].ghost_hand.set_base_link_position_orientation( + self.scene.agent["right_hand_1"]["xyz"], quat_from_euler(self.scene.agent["right_hand_1"]["rpy"]) + ) + agent.parts["eye"].set_base_link_position_orientation( + self.scene.agent["BREye_1"]["xyz"], quat_from_euler(self.scene.agent["BREye_1"]["rpy"]) + ) + elif self.robot_type == FetchGripper: + agent = FetchGripper(self.simulator, self.robot_config) + self.simulator.import_robot(agent) + agent.set_position_orientation([300, 300, 300], [0, 0, 0, 1]) + self.object_scope["agent.n.01_1"] = agent + if cached_initial_pose: + # Use the cached pose of BehaviorRobot to initialize FetchGripper + pos = np.copy(self.scene.agent["BRBody_1"]["xyz"]) + pos[2] = 0.0 + pos[2] = stable_z_on_aabb(agent.get_body_id(), [pos, pos]) + agent.set_position_orientation(pos, quat_from_euler(self.scene.agent["BRBody_1"]["rpy"])) + else: + Exception("Only BehaviorRobot and FetchGripper are supported") + + agent.robot_specific_reset() + self.simulator.register_main_vr_robot(agent) + assert len(self.simulator.robots) == 1, "Error, multiple agents is not currently supported" + + def move_agent(self): + """ + Backwards compatibility, to be deprecated + """ + pass + + def import_scene(self): + self.simulator.reload() + self.simulator.import_ig_scene(self.scene) + + # if not self.online_sampling: + for obj_inst in self.object_scope: + matched_sim_obj = None + + # TODO: remove after split floors + if "floor.n.01" in obj_inst: + for _, sim_obj in self.scene.objects_by_name.items(): + if sim_obj.bddl_object_scope is not None and obj_inst in sim_obj.bddl_object_scope: + bddl_object_scope = sim_obj.bddl_object_scope.split(",") + bddl_object_scope = {item.split(":")[0]: item.split(":")[1] for item in bddl_object_scope} + assert obj_inst in bddl_object_scope + room_inst = bddl_object_scope[obj_inst].replace("room_floor_", "") + matched_sim_obj = RoomFloor( + category="room_floor", + name=bddl_object_scope[obj_inst], + scene=self.scene, + room_instance=room_inst, + floor_obj=self.scene.objects_by_name["floors"], + ) + elif obj_inst == "agent.n.01_1": + # Skip adding agent to object scope, handled later by import_agent() + continue + else: + for _, sim_obj in self.scene.objects_by_name.items(): + if sim_obj.bddl_object_scope == obj_inst: + matched_sim_obj = sim_obj + break + + assert matched_sim_obj is not None, obj_inst + self.object_scope[obj_inst] = matched_sim_obj + + def sample(self, kinematic_only=False): + feedback = {"init_success": "yes", "goal_success": "yes", "init_feedback": "", "goal_feedback": ""} + non_sampleable_obj_conditions = [] + sampleable_obj_conditions = [] + + # TODO: currently we assume self.initial_conditions is a list of + # bddl.condition_evaluation.HEAD, each with one child. + # This chid is either a ObjectStateUnaryPredicate/ObjectStateBinaryPredicate or + # a Negation of a ObjectStateUnaryPredicate/ObjectStateBinaryPredicate + for condition in self.initial_conditions: + if not isinstance(condition.children[0], Negation) and not isinstance(condition.children[0], AtomicFormula): + logging.warning(("Skipping over sampling of predicate that is not a negation or an atomic formula")) + continue + if kinematic_only: + if isinstance(condition.children[0], Negation): + if condition.children[0].children[0].STATE_NAME not in KINEMATICS_STATES: + continue + else: + if condition.children[0].STATE_NAME not in KINEMATICS_STATES: + continue + if isinstance(condition.children[0], Negation): + condition = condition.children[0].children[0] + positive = False + else: + condition = condition.children[0] + positive = True + condition_body = set(condition.body) + if len(self.non_sampleable_object_inst.intersection(condition_body)) > 0: + non_sampleable_obj_conditions.append((condition, positive)) + else: + sampleable_obj_conditions.append((condition, positive)) + + # First, try to fulfill the initial conditions that involve non-sampleable objects + # Filter in all simulator objects that allow successful sampling for each object inst + init_sampling_log = [] + scene_object_scope_filtered = {} + for room_type in self.non_sampleable_object_scope: + scene_object_scope_filtered[room_type] = {} + for scene_obj in self.non_sampleable_object_scope[room_type]: + scene_object_scope_filtered[room_type][scene_obj] = {} + for room_inst in self.non_sampleable_object_scope[room_type][scene_obj]: + for obj in self.non_sampleable_object_scope[room_type][scene_obj][room_inst]: + self.object_scope[scene_obj] = obj + + success = True + # If this object is not involved in any initial conditions, + # success will be True by default and any simulator obj will qualify + for condition, positive in non_sampleable_obj_conditions: + # Always skip non-kinematic state sampling. Only do so after the object scope has been finalized + if condition.STATE_NAME not in KINEMATICS_STATES: + continue + # Only sample conditions that involve this object + if scene_obj not in condition.body: + continue + success = condition.sample(binary_state=positive) + log_msg = " ".join( + [ + "initial condition sampling", + room_type, + scene_obj, + room_inst, + obj.name, + condition.STATE_NAME, + str(condition.body), + str(success), + ] + ) + logging.warning((log_msg)) + init_sampling_log.append(log_msg) + + if not success: + break + + if not success: + continue + + if room_inst not in scene_object_scope_filtered[room_type][scene_obj]: + scene_object_scope_filtered[room_type][scene_obj][room_inst] = [] + scene_object_scope_filtered[room_type][scene_obj][room_inst].append(obj) + + for room_type in scene_object_scope_filtered: + # For each room_type, filter in room_inst that has successful + # sampling options for all obj_inst in this room_type + room_inst_satisfied = set.intersection( + *[ + set(scene_object_scope_filtered[room_type][obj_inst].keys()) + for obj_inst in scene_object_scope_filtered[room_type] + ] + ) + + if len(room_inst_satisfied) == 0: + error_msg = "Room type [{}] of scene [{}] cannot sample all the objects needed.\nThe following are the possible room instances for each object, the intersection of which is an empty set.\n".format( + room_type, self.scene.scene_id + ) + for obj_inst in scene_object_scope_filtered[room_type]: + error_msg += ( + "{}: ".format(obj_inst) + + ", ".join(scene_object_scope_filtered[room_type][obj_inst].keys()) + + "\n" + ) + error_msg += "The following are the initial condition sampling history:\n" + error_msg += "\n".join(init_sampling_log) + logging.warning(error_msg) + feedback["init_success"] = "no" + feedback["goal_success"] = "untested" + feedback["init_feedback"] = error_msg + + if self.should_debug_sampling: + self.debug_sampling(scene_object_scope_filtered, non_sampleable_obj_conditions) + return False, feedback + + for obj_inst in scene_object_scope_filtered[room_type]: + scene_object_scope_filtered[room_type][obj_inst] = { + key: val + for key, val in scene_object_scope_filtered[room_type][obj_inst].items() + if key in room_inst_satisfied + } + + # For each room instance, perform maximum bipartite matching between object instance in scope to simulator objects + # Left nodes: a list of object instance in scope + # Right nodes: a list of simulator objects + # Edges: if the simulator object can support the sampling requirement of this object instance + for room_type in scene_object_scope_filtered: + # The same room instances will be shared across all scene obj in a given room type + some_obj = list(scene_object_scope_filtered[room_type].keys())[0] + room_insts = list(scene_object_scope_filtered[room_type][some_obj].keys()) + success = False + init_mbm_log = [] + # Loop through each room instance + for room_inst in room_insts: + graph = nx.Graph() + # For this given room instance, gether mapping from obj instance to a list of simulator obj + obj_inst_to_obj_per_room_inst = {} + for obj_inst in scene_object_scope_filtered[room_type]: + obj_inst_to_obj_per_room_inst[obj_inst] = scene_object_scope_filtered[room_type][obj_inst][ + room_inst + ] + top_nodes = [] + log_msg = "MBM for room instance [{}]".format(room_inst) + logging.warning((log_msg)) + init_mbm_log.append(log_msg) + for obj_inst in obj_inst_to_obj_per_room_inst: + for obj in obj_inst_to_obj_per_room_inst[obj_inst]: + # Create an edge between obj instance and each of the simulator obj that supports sampling + graph.add_edge(obj_inst, obj) + log_msg = "Adding edge: {} <-> {}".format(obj_inst, obj.name) + logging.warning((log_msg)) + init_mbm_log.append(log_msg) + top_nodes.append(obj_inst) + # Need to provide top_nodes that contain all nodes in one bipartite node set + # The matches will have two items for each match (e.g. A -> B, B -> A) + matches = nx.bipartite.maximum_matching(graph, top_nodes=top_nodes) + if len(matches) == 2 * len(obj_inst_to_obj_per_room_inst): + logging.warning(("Object scope finalized:")) + for obj_inst, obj in matches.items(): + if obj_inst in obj_inst_to_obj_per_room_inst: + self.object_scope[obj_inst] = obj + logging.warning((obj_inst, obj.name)) + success = True + break + if not success: + error_msg = "Room type [{}] of scene [{}] do not have enough simulator objects that can successfully sample all the objects needed. This is usually caused by specifying too many object instances in the object scope or the conditions are so stringent that too few simulator objects can satisfy them via sampling.\n".format( + room_type, self.scene.scene_id + ) + error_msg += "The following are the initial condition matching history:\n" + error_msg += "\n".join(init_mbm_log) + logging.warning(error_msg) + feedback["init_success"] = "no" + feedback["goal_success"] = "untested" + feedback["init_feedback"] = error_msg + return False, feedback + + np.random.shuffle(self.ground_goal_state_options) + logging.warning(("number of ground_goal_state_options", len(self.ground_goal_state_options))) + num_goal_condition_set_to_test = 10 + + goal_sampling_error_msgs = [] + # Next, try to fulfill different set of ground goal conditions (maximum num_goal_condition_set_to_test) + for goal_condition_set in self.ground_goal_state_options[:num_goal_condition_set_to_test]: + goal_condition_set_success = True + goal_sampling_log = [] + # Try to fulfill the current set of ground goal conditions + scene_object_scope_filtered_goal_cond = {} + for room_type in scene_object_scope_filtered: + scene_object_scope_filtered_goal_cond[room_type] = {} + for scene_obj in scene_object_scope_filtered[room_type]: + scene_object_scope_filtered_goal_cond[room_type][scene_obj] = {} + for room_inst in scene_object_scope_filtered[room_type][scene_obj]: + for obj in scene_object_scope_filtered[room_type][scene_obj][room_inst]: + self.object_scope[scene_obj] = obj + + success = True + for goal_condition in goal_condition_set: + goal_condition = goal_condition.children[0] + # do not sample negative goal condition + if isinstance(goal_condition, Negation): + continue + # only sample kinematic goal condition + if goal_condition.STATE_NAME not in KINEMATICS_STATES: + continue + if scene_obj not in goal_condition.body: + continue + success = goal_condition.sample(binary_state=True) + log_msg = " ".join( + [ + "goal condition sampling", + room_type, + scene_obj, + room_inst, + obj.name, + goal_condition.STATE_NAME, + str(goal_condition.body), + str(success), + ] + ) + logging.warning((log_msg)) + goal_sampling_log.append(log_msg) + if not success: + break + if not success: + continue + + if room_inst not in scene_object_scope_filtered_goal_cond[room_type][scene_obj]: + scene_object_scope_filtered_goal_cond[room_type][scene_obj][room_inst] = [] + scene_object_scope_filtered_goal_cond[room_type][scene_obj][room_inst].append(obj) + + for room_type in scene_object_scope_filtered_goal_cond: + # For each room_type, filter in room_inst that has successful + # sampling options for all obj_inst in this room_type + room_inst_satisfied = set.intersection( + *[ + set(scene_object_scope_filtered_goal_cond[room_type][obj_inst].keys()) + for obj_inst in scene_object_scope_filtered_goal_cond[room_type] + ] + ) + + if len(room_inst_satisfied) == 0: + error_msg = "Room type [{}] of scene [{}] cannot sample all the objects needed.\nThe following are the possible room instances for each object, the intersection of which is an empty set.\n".format( + room_type, self.scene.scene_id + ) + for obj_inst in scene_object_scope_filtered_goal_cond[room_type]: + error_msg += ( + "{}: ".format(obj_inst) + + ", ".join(scene_object_scope_filtered_goal_cond[room_type][obj_inst].keys()) + + "\n" + ) + error_msg += "The following are the goal condition sampling history:\n" + error_msg += "\n".join(goal_sampling_log) + logging.warning(error_msg) + goal_sampling_error_msgs.append(error_msg) + if self.should_debug_sampling: + self.debug_sampling( + scene_object_scope_filtered_goal_cond, non_sampleable_obj_conditions, goal_condition_set + ) + goal_condition_set_success = False + break + + for obj_inst in scene_object_scope_filtered_goal_cond[room_type]: + scene_object_scope_filtered_goal_cond[room_type][obj_inst] = { + key: val + for key, val in scene_object_scope_filtered_goal_cond[room_type][obj_inst].items() + if key in room_inst_satisfied + } + + if not goal_condition_set_success: + continue + # For each room instance, perform maximum bipartite matching between object instance in scope to simulator objects + # Left nodes: a list of object instance in scope + # Right nodes: a list of simulator objects + # Edges: if the simulator object can support the sampling requirement of ths object instance + for room_type in scene_object_scope_filtered_goal_cond: + # The same room instances will be shared across all scene obj in a given room type + some_obj = list(scene_object_scope_filtered_goal_cond[room_type].keys())[0] + room_insts = list(scene_object_scope_filtered_goal_cond[room_type][some_obj].keys()) + success = False + goal_mbm_log = [] + # Loop through each room instance + for room_inst in room_insts: + graph = nx.Graph() + # For this given room instance, gether mapping from obj instance to a list of simulator obj + obj_inst_to_obj_per_room_inst = {} + for obj_inst in scene_object_scope_filtered_goal_cond[room_type]: + obj_inst_to_obj_per_room_inst[obj_inst] = scene_object_scope_filtered_goal_cond[room_type][ + obj_inst + ][room_inst] + top_nodes = [] + log_msg = "MBM for room instance [{}]".format(room_inst) + logging.warning((log_msg)) + goal_mbm_log.append(log_msg) + for obj_inst in obj_inst_to_obj_per_room_inst: + for obj in obj_inst_to_obj_per_room_inst[obj_inst]: + # Create an edge between obj instance and each of the simulator obj that supports sampling + graph.add_edge(obj_inst, obj) + log_msg = "Adding edge: {} <-> {}".format(obj_inst, obj.name) + logging.warning((log_msg)) + goal_mbm_log.append(log_msg) + top_nodes.append(obj_inst) + # Need to provide top_nodes that contain all nodes in one bipartite node set + # The matches will have two items for each match (e.g. A -> B, B -> A) + matches = nx.bipartite.maximum_matching(graph, top_nodes=top_nodes) + if len(matches) == 2 * len(obj_inst_to_obj_per_room_inst): + logging.warning(("Object scope finalized:")) + for obj_inst, obj in matches.items(): + if obj_inst in obj_inst_to_obj_per_room_inst: + self.object_scope[obj_inst] = obj + logging.warning((obj_inst, obj.name)) + success = True + break + if not success: + error_msg = "Room type [{}] of scene [{}] do not have enough simulator objects that can successfully sample all the objects needed. This is usually caused by specifying too many object instances in the object scope or the conditions are so stringent that too few simulator objects can satisfy them via sampling.\n".format( + room_type, self.scene.scene_id + ) + error_msg += "The following are the goal condition matching history:\n" + error_msg += "\n".join(goal_mbm_log) + logging.warning(error_msg) + goal_sampling_error_msgs.append(error_msg) + goal_condition_set_success = False + break + + if not goal_condition_set_success: + continue + + # if one set of goal conditions (and initial conditions) are satisfied, sampling is successful + break + + if not goal_condition_set_success: + goal_sampling_error_msg_compiled = "" + for i, log_msg in enumerate(goal_sampling_error_msgs): + goal_sampling_error_msg_compiled += "-" * 30 + "\n" + goal_sampling_error_msg_compiled += "Ground condition set #{}/{}:\n".format( + i + 1, len(goal_sampling_error_msgs) + ) + goal_sampling_error_msg_compiled += log_msg + "\n" + feedback["goal_success"] = "no" + feedback["goal_feedback"] = goal_sampling_error_msg_compiled + return False, feedback + + # Do sampling again using the object instance -> simulator object mapping from maximum bipartite matching + for condition, positive in non_sampleable_obj_conditions: + num_trials = 10 + for _ in range(num_trials): + success = condition.sample(binary_state=positive) + if success: + break + if not success: + logging.warning( + "Non-sampleable object conditions failed even after successful matching: {}".format(condition.body) + ) + feedback["init_success"] = "no" + feedback["init_feedback"] = "Please run test sampling again." + return False, feedback + + # Use ray casting for ontop and inside sampling for non-sampleable objects + for condition, positive in sampleable_obj_conditions: + if condition.STATE_NAME in ["inside", "ontop"]: + condition.kwargs["use_ray_casting_method"] = True + + if len(self.sampling_orders) > 0: + # Pop non-sampleable objects + self.sampling_orders.pop(0) + for cur_batch in self.sampling_orders: + # First sample non-sliced conditions + for condition, positive in sampleable_obj_conditions: + if condition.STATE_NAME == "sliced": + continue + # Sample conditions that involve the current batch of objects + if condition.body[0] in cur_batch: + num_trials = 100 + for _ in range(num_trials): + success = condition.sample(binary_state=positive) + if success: + break + if not success: + error_msg = "Sampleable object conditions failed: {} {}".format( + condition.STATE_NAME, condition.body + ) + logging.warning(error_msg) + feedback["init_success"] = "no" + feedback["init_feedback"] = error_msg + return False, feedback + + # Then sample non-sliced conditions + for condition, positive in sampleable_obj_conditions: + if condition.STATE_NAME != "sliced": + continue + # Sample conditions that involve the current batch of objects + if condition.body[0] in cur_batch: + success = condition.sample(binary_state=positive) + if not success: + error_msg = "Sampleable object conditions failed: {}".format(condition.body) + logging.warning(error_msg) + feedback["init_success"] = "no" + feedback["init_feedback"] = error_msg + return False, feedback + + return True, feedback + + def debug_sampling(self, scene_object_scope_filtered, non_sampleable_obj_conditions, goal_condition_set=None): + igibson.debug_sampling = True + for room_type in self.non_sampleable_object_scope: + for scene_obj in self.non_sampleable_object_scope[room_type]: + if len(scene_object_scope_filtered[room_type][scene_obj].keys()) != 0: + continue + for room_inst in self.non_sampleable_object_scope[room_type][scene_obj]: + for obj in self.non_sampleable_object_scope[room_type][scene_obj][room_inst]: + self.object_scope[scene_obj] = obj + + for condition, positive in non_sampleable_obj_conditions: + # Only sample conditions that involve this object + if scene_obj not in condition.body: + continue + print( + "debug initial condition sampling", + room_type, + scene_obj, + room_inst, + obj.name, + condition.STATE_NAME, + condition.body, + ) + obj_pos = obj.get_position() + # Set the pybullet camera to have a bird's eye view + # of the sampling process + p.resetDebugVisualizerCamera( + cameraDistance=3.0, cameraYaw=0, cameraPitch=-89.99999, cameraTargetPosition=obj_pos + ) + success = condition.sample(binary_state=positive) + print("success", success) + + if goal_condition_set is None: + continue + + for goal_condition in goal_condition_set: + goal_condition = goal_condition.children[0] + if isinstance(goal_condition, Negation): + continue + if goal_condition.STATE_NAME not in KINEMATICS_STATES: + continue + if scene_obj not in goal_condition.body: + continue + print( + "goal condition sampling", + room_type, + scene_obj, + room_inst, + obj.name, + goal_condition.STATE_NAME, + goal_condition.body, + ) + obj_pos = obj.get_position() + # Set the pybullet camera to have a bird's eye view + # of the sampling process + p.resetDebugVisualizerCamera( + cameraDistance=3.0, cameraYaw=0, cameraPitch=-89.99999, cameraTargetPosition=obj_pos + ) + success = goal_condition.sample(binary_state=True) + print("success", success) + + def clutter_scene(self): + if not self.load_clutter: + return + + scene_id = self.scene.scene_id + clutter_ids = [""] + list(range(2, 5)) + clutter_id = np.random.choice(clutter_ids) + clutter_scene = InteractiveIndoorScene(scene_id, "{}_clutter{}".format(scene_id, clutter_id)) + existing_objects = [value for key, value in self.object_scope.items() if "floor.n.01" not in key] + self.simulator.import_non_colliding_objects( + objects=clutter_scene.objects_by_name, existing_objects=existing_objects, min_distance=0.5 + ) + + def get_task_obs(self, env): + state = OrderedDict() + task_obs = np.zeros((self.task_obs_dim)) + state["robot_pos"] = np.array(env.robots[0].get_position()) + state["robot_orn"] = np.array(env.robots[0].get_rpy()) + + i = 0 + for _, v in self.object_scope.items(): + if isinstance(v, URDFObject): + state["obj_{}_valid".format(i)] = 1.0 + state["obj_{}_pos".format(i)] = np.array(v.get_position()) + state["obj_{}_orn".format(i)] = np.array(p.getEulerFromQuaternion(v.get_orientation())) + grasping_objects = env.robots[0].is_grasping(v.get_body_id()) + for grasp_idx, grasping in enumerate(grasping_objects): + state["obj_{}_pos_in_gripper_{}".format(i, grasp_idx)] = float(grasping) + i += 1 + + state_list = [] + for k, v in state.items(): + if isinstance(v, list): + state_list.extend(v) + elif isinstance(v, tuple): + state_list.extend(list(v)) + elif isinstance(v, np.ndarray): + state_list.extend(list(v)) + elif isinstance(v, (float, int)): + state_list.append(v) + else: + raise ValueError("cannot serialize task obs") + + assert len(state_list) <= len(task_obs) + task_obs[: len(state_list)] = state_list + + return task_obs + + +def main(): + igbhvr_act_inst = iGBEHAVIORActivityInstance("assembling_gift_baskets", 0) + igbhvr_act_inst.initialize_simulator(mode="headless", scene_id="Rs_int") + + for i in range(500): + igbhvr_act_inst.simulator.step() + success, failed_conditions = igbhvr_act_inst.check_success() + print("ACTIVITY SUCCESS:", success) + if not success: + print("FAILED CONDITIONS:", failed_conditions) + igbhvr_act_inst.simulator.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/tasks/bddl_backend.py b/igibson/activity/bddl_backend.py similarity index 100% rename from igibson/tasks/bddl_backend.py rename to igibson/activity/bddl_backend.py diff --git a/igibson/utils/data_utils/ext_object/scripts_wip/get_obj_shelf_heights.py b/igibson/activity/get_obj_shelf_heights.py similarity index 98% rename from igibson/utils/data_utils/ext_object/scripts_wip/get_obj_shelf_heights.py rename to igibson/activity/get_obj_shelf_heights.py index 4a5f0f04c..039a5e15e 100644 --- a/igibson/utils/data_utils/ext_object/scripts_wip/get_obj_shelf_heights.py +++ b/igibson/activity/get_obj_shelf_heights.py @@ -29,8 +29,7 @@ out_dict = {} obj = ArticulatedObject(filename=args.object_file, scale=1.0) - simulator.import_object(obj) - body_id = obj.get_body_id() + body_id = simulator.import_object(obj) aabb = pb.getAABB(body_id) size = [aabb[1][0] - aabb[0][0], aabb[1][1] - aabb[0][1], aabb[1][2] - aabb[0][2]] diff --git a/igibson/utils/data_utils/sampling_task/batch_sampling_saver.py b/igibson/activity/tests/batch_sampling_saver.py similarity index 100% rename from igibson/utils/data_utils/sampling_task/batch_sampling_saver.py rename to igibson/activity/tests/batch_sampling_saver.py diff --git a/igibson/activity/tests/sampling_loader.py b/igibson/activity/tests/sampling_loader.py new file mode 100644 index 000000000..fa07b3878 --- /dev/null +++ b/igibson/activity/tests/sampling_loader.py @@ -0,0 +1,50 @@ +import bddl +from IPython import embed + +from igibson.activity.activity_base import iGBEHAVIORActivityInstance +from igibson.simulator import Simulator + +bddl.set_backend("iGibson") + +task_choices = [ + "packing_lunches_filtered", + "assembling_gift_baskets_filtered", + "organizing_school_stuff_filtered", + "re-shelving_library_books_filtered", + "serving_hors_d_oeuvres_filtered", + "putting_away_toys_filtered", + "putting_away_Christmas_decorations_filtered", + "putting_dishes_away_after_cleaning_filtered", + "cleaning_out_drawers_filtered", +] +task = "assembling_gift_baskets" +task_id = 0 +scene = "Rs_int" +num_init = 0 + +igbhvr_act_inst = iGBEHAVIORActivityInstance(task, activity_definition=task_id) +scene_kwargs = { + # 'load_object_categories': ['oven', 'fridge', 'countertop', 'cherry', 'sausage', 'tray'], + "not_load_object_categories": ["ceilings"], + "urdf_file": "{}_task_{}_{}_{}".format(scene, task, task_id, num_init), +} +simulator = Simulator(mode="headless", image_width=960, image_height=720) +init_success = igbhvr_act_inst.initialize_simulator( + scene_id=scene, + simulator=simulator, + load_clutter=True, + should_debug_sampling=False, + scene_kwargs=scene_kwargs, + online_sampling=False, +) +print("success") +embed() + +while True: + igbhvr_act_inst.simulator.step() + success, sorted_conditions = igbhvr_act_inst.check_success() + print("TASK SUCCESS:", success) + if not success: + print("FAILED CONDITIONS:", sorted_conditions["unsatisfied"]) + else: + pass diff --git a/igibson/utils/data_utils/sampling_task/sampling_saver.py b/igibson/activity/tests/sampling_saver.py similarity index 64% rename from igibson/utils/data_utils/sampling_task/sampling_saver.py rename to igibson/activity/tests/sampling_saver.py index 9bb9dc001..ecf93d30a 100644 --- a/igibson/utils/data_utils/sampling_task/sampling_saver.py +++ b/igibson/activity/tests/sampling_saver.py @@ -5,12 +5,14 @@ import bddl import pybullet as p -from IPython import embed import igibson -from igibson.envs.igibson_env import iGibsonEnv +from igibson.activity.activity_base import iGBEHAVIORActivityInstance from igibson.simulator import Simulator -from igibson.utils.utils import parse_config, restoreState + +PARTIAL_RECACHE = { + # 'sorting_books': ['Ihlen_0_int'], +} def parse_args(): @@ -19,7 +21,6 @@ def parse_args(): "--task", type=str, required=True, help="Name of ATUS task matching BDDL parent folder in bddl." ) parser.add_argument("--task_id", type=int, required=True, help="BDDL integer ID, matching suffix of bddl.") - parser.add_argument("--scenes", type=str, nargs="+", help="A list of scenes to sample the BDDL description.") parser.add_argument("--max_trials", type=int, default=1, help="Maximum number of trials to try sampling.") parser.add_argument( "--num_initializations", type=int, default=1, help="Number of initialization per BDDL per scene." @@ -28,11 +29,19 @@ def parse_args(): return parser.parse_args() +def remove_newly_added_objects(igbhvr_act_inst, state_id): + for sim_obj in igbhvr_act_inst.newly_added_objects: + igbhvr_act_inst.scene.remove_object(sim_obj) + for id in sim_obj.body_ids: + p.removeBody(id) + p.restoreState(state_id) + + def main(): args = parse_args() + bddl.set_backend("iGibson") task = args.task task_id = args.task_id - scenes = args.scenes start_initialization = args.start_initialization logging.warning("TASK: {}".format(task)) logging.warning("TASK ID: {}".format(task_id)) @@ -42,53 +51,48 @@ def main(): with open(scene_json) as f: activity_to_scenes = json.load(f) - if scenes is not None: - scene_choices = scenes - elif task in activity_to_scenes: - scene_choices = activity_to_scenes[task] - else: - scene_choices = [item for item in get_available_ig_scenes() if item.endswith("_int")] + if task not in activity_to_scenes: + return + + scene_choices = activity_to_scenes[task] + if task in PARTIAL_RECACHE: + scene_choices = PARTIAL_RECACHE[task] + # scene_choices = ['Rs_int'] logging.warning(("SCENE CHOICES", scene_choices)) num_initializations = args.num_initializations num_trials = args.max_trials - - config_file = os.path.join(igibson.example_config_path, "behavior_vr.yaml") - env_config = parse_config(config_file) - env_config["task"] = task - env_config["task_id"] = task_id - env_config["online_sampling"] = True - env_config["load_clutter"] = True - + simulator = Simulator(mode="headless", image_width=960, image_height=720, device_idx=0) + scene_kwargs = {} + igbhvr_act_inst = iGBEHAVIORActivityInstance(task, activity_definition=task_id) for scene_id in scene_choices: logging.warning(("TRY SCENE:", scene_id)) - env_config["scene_id"] = scene_id + for init_id in range(start_initialization, start_initialization + num_initializations): urdf_path = "{}_task_{}_{}_{}".format(scene_id, task, task_id, init_id) full_path = os.path.join(igibson.ig_dataset_path, "scenes", scene_id, "urdf", urdf_path + ".urdf") if os.path.isfile(full_path): logging.warning("Already cached: {}".format(full_path)) continue - for _ in range(num_trials): - env = iGibsonEnv( - config_file=env_config, + success = igbhvr_act_inst.initialize_simulator( + simulator=simulator, + scene_id=scene_id, mode="headless", + load_clutter=True, + should_debug_sampling=False, + scene_kwargs=scene_kwargs, + online_sampling=True, ) - success = env.task.initialized if success: break - else: - env.close() if success: sim_obj_to_bddl_obj = { - value.name: {"object_scope": key} for key, value in env.task.object_scope.items() + value.name: {"object_scope": key} for key, value in igbhvr_act_inst.object_scope.items() } - env.scene.save_modified_urdf(urdf_path, sim_obj_to_bddl_obj) + igbhvr_act_inst.scene.save_modified_urdf(urdf_path, sim_obj_to_bddl_obj) logging.warning(("Saved:", urdf_path)) - env.close() - embed() if __name__ == "__main__": diff --git a/igibson/activity/tests/sampling_test.py b/igibson/activity/tests/sampling_test.py new file mode 100644 index 000000000..b4d7764a9 --- /dev/null +++ b/igibson/activity/tests/sampling_test.py @@ -0,0 +1,33 @@ +import bddl +from IPython import embed + +from igibson.activity.activity_base import iGBEHAVIORActivityInstance +from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings +from igibson.simulator import Simulator + +bddl.set_backend("iGibson") + +activity = "assembling_gift_baskets" +scene_id = "Rs_int" + +igbhvr_act_inst = iGBEHAVIORActivityInstance(activity, activity_definition=0) +scene_kwargs = { + "not_load_object_categories": ["ceilings"], +} +settings = MeshRendererSettings(texture_scale=1) +simulator = Simulator(mode="headless", image_width=960, image_height=720, rendering_settings=settings) +init_success = igbhvr_act_inst.initialize_simulator( + scene_id=scene_id, simulator=simulator, load_clutter=False, should_debug_sampling=True, scene_kwargs=scene_kwargs +) +assert init_success +print("success") +embed() + +while True: + igbhvr_act_inst.simulator.step() + success, sorted_conditions = igbhvr_act_inst.check_success() + print("TASK SUCCESS:", success) + if not success: + print("FAILED CONDITIONS:", sorted_conditions["unsatisfied"]) + else: + pass diff --git a/igibson/challenge/behavior_challenge.py b/igibson/challenge/behavior_challenge.py new file mode 100644 index 000000000..4066d662a --- /dev/null +++ b/igibson/challenge/behavior_challenge.py @@ -0,0 +1,189 @@ +import json +import logging +import os +import shutil +from collections import defaultdict + +import bddl +import numpy as np + +import igibson +from igibson.envs.behavior_env import BehaviorEnv +from igibson.metrics.agent import BehaviorRobotMetric +from igibson.metrics.disarrangement import KinematicDisarrangement, LogicalDisarrangement +from igibson.metrics.task import TaskMetric +from igibson.utils.utils import parse_config + +logging.getLogger().setLevel(logging.WARNING) + + +def get_metrics_callbacks(): + metrics = [ + KinematicDisarrangement(), + LogicalDisarrangement(), + BehaviorRobotMetric(), + TaskMetric(), + ] + + return ( + [metric.start_callback for metric in metrics], + [metric.step_callback for metric in metrics], + [metric.end_callback for metric in metrics], + [metric.gather_results for metric in metrics], + ) + + +class BehaviorChallenge(object): + def __init__(self): + self.config_file = os.environ["CONFIG_FILE"] + self.split = os.environ["SPLIT"] + self.output_dir = os.environ["OUTPUT_DIR"] + + def submit(self, agent): + env_config = parse_config(self.config_file) + + if self.split == "minival": + # Only eval one activity in the config file + tasks = [env_config["task"]] + else: + # Eval all 100 activities + tasks = sorted( + [ + item + for item in os.listdir(os.path.join(os.path.dirname(bddl.__file__), "activity_conditions")) + if item != "domain_igibson.bddl" + ] + ) + assert len(tasks) == 100 + + log_path = os.path.join(self.output_dir, "per_episode_metrics.json") + summary_log_path = os.path.join(self.output_dir, "aggregated_metrics.json") + + self_reported_log_path = os.path.join( + self.output_dir, "..", "participant_reported_results", "per_episode_metrics.json" + ) + self_reported_summary_log_path = os.path.join( + self.output_dir, "..", "participant_reported_results", "aggregated_metrics.json" + ) + if os.path.exists(self_reported_log_path): + shutil.copyfile(self_reported_log_path, log_path) + print("Per episode eval results copied from self-reported results %s" % log_path) + with open(self_reported_log_path) as f: + self_reported_log = json.load(f) + assert len(self_reported_log) == len(tasks) * 9 + + if os.path.exists(self_reported_summary_log_path): + shutil.copyfile(self_reported_summary_log_path, summary_log_path) + print("Aggregated eval results copied from self-reported results %s" % summary_log_path) + return + + episode = 0 + per_episode_metrics = {} + scene_json = os.path.join(os.path.dirname(bddl.__file__), "../utils", "activity_to_preselected_scenes.json") + + with open(scene_json) as f: + activity_to_scenes = json.load(f) + + with open(os.path.join(igibson.ig_dataset_path, "metadata", "behavior_activity_statistics.json")) as f: + activity_metadata = json.load(f) + + for task in tasks: + assert task in activity_to_scenes + scenes = sorted(set(activity_to_scenes[tasks[0]])) + num_scenes = len(scenes) + human_demo_mean_step = activity_metadata[task]["mean"] + env_config["max_step"] = human_demo_mean_step * 2 # adjust env_config['max_step'] based on the human + # demonstration, we give agent 2x steps of average human demonstration across all possible scenes + + assert num_scenes <= 3 + + # Evaluate 9 activity instances in the training set for now + if num_scenes == 3: + scene_instance_ids = {scenes[0]: range(3), scenes[1]: range(3), scenes[2]: range(3)} + elif num_scenes == 2: + scene_instance_ids = {scenes[0]: range(4), scenes[1]: range(5)} + else: + scene_instance_ids = {scenes[0]: range(9)} + + for scene_id, instance_ids in scene_instance_ids.items(): + env_config["scene_id"] = scene_id + for instance_id in instance_ids: + env = BehaviorEnv( + config_file=env_config, + mode="all", + action_timestep=1.0 / 30.0, + physics_timestep=1.0 / 120.0, + instance_id=instance_id, + ) + start_callbacks, step_callbacks, end_callbacks, data_callbacks = get_metrics_callbacks() + for callback in start_callbacks: + callback(env.task, None) + agent.reset() + state = env.reset() + while True: + action = agent.act(state) + state, reward, done, info = env.step(action) + for callback in step_callbacks: + callback(env.task, None) + if done: + break + for callback in end_callbacks: + callback(env.task, None) + metrics_summary = {} + for callback in data_callbacks: + metrics_summary.update(callback()) + + metrics_summary["task"] = task + per_episode_metrics[episode] = metrics_summary + episode += 1 + env.close() + + with open(log_path, "w+") as f: + json.dump(per_episode_metrics, f) + print("Per episode eval results saved to %s" % log_path) + + aggregated_metrics = {} + success_score = [] + simulator_time = [] + kinematic_disarrangement = [] + logical_disarrangement = [] + distance_navigated = [] + displacement_of_hands = [] + + task_to_mean_success_score = defaultdict(list) + task_scores = [] + + for episode, metric in per_episode_metrics.items(): + task_to_mean_success_score[metric["task"]].append(metric["q_score"]["timestep"][-1]) + + for task, scores in task_to_mean_success_score.items(): + task_scores.append(np.mean(scores)) + + task_scores = sorted(task_scores, reverse=True) + + for episode, metric in per_episode_metrics.items(): + success_score.append(metric["q_score"]["timestep"][-1]) + simulator_time.append(metric["time"]["simulator_time"]) + kinematic_disarrangement.append(metric["kinematic_disarrangement"]["relative"]) + logical_disarrangement.append(metric["logical_disarrangement"]["relative"]) + distance_navigated.append(np.sum(metric["agent_distance"]["timestep"]["body"])) + displacement_of_hands.append( + np.sum(metric["grasp_distance"]["timestep"]["left_hand"]) + + np.sum(metric["grasp_distance"]["timestep"]["right_hand"]) + ) + + aggregated_metrics["Success Score"] = np.mean(success_score) + aggregated_metrics["Success Score Top 5"] = np.mean(np.array(task_scores)[:5]) + aggregated_metrics["Simulated Time"] = np.mean(simulator_time) + aggregated_metrics["Kinematic Disarrangement"] = np.mean(kinematic_disarrangement) + aggregated_metrics["Logical Disarrangement"] = np.mean(logical_disarrangement) + aggregated_metrics["Distance Navigated"] = np.mean(distance_navigated) + aggregated_metrics["Displacement of Hands"] = np.mean(displacement_of_hands) + with open(summary_log_path, "w+") as f: + json.dump(aggregated_metrics, f) + print("Aggregated eval results saved to %s" % summary_log_path) + + +if __name__ == "__main__": + challenge = BehaviorChallenge() + challenge.submit(None) diff --git a/igibson/configs/behavior_full_observability_fetch.yaml b/igibson/configs/behavior_full_observability_fetch.yaml deleted file mode 100644 index 763a01242..000000000 --- a/igibson/configs/behavior_full_observability_fetch.yaml +++ /dev/null @@ -1,65 +0,0 @@ -# scene -scene: igibson -scene_id: Beechwood_1_int -clutter: false -build_graph: true -load_texture: true -pybullet_load_texture: true -should_open_all_doors: false - -# domain randomization -texture_randomization_freq: null -object_randomization_freq: null - -# robot -robot: - name: Fetch - action_type: continuous - action_normalize: true - base_name: null - scale: 1.0 - self_collision: true - rendering_params: null - assisted_grasping_mode: strict - rigid_trunk: false - default_trunk_offset: 0.365 - default_arm_pose: diagonal30 - controller_config: - base: - name: DifferentialDriveController - arm: - name: InverseKinematicsController - kv: 2.0 - gripper: - name: ParallelJawGripperController - mode: binary - camera: - name: JointController - use_delta_commands: False - -# task -task: cleaning_cupboards -task_id: 0 -instance_id: 0 -online_sampling: false - -# discount factor -discount_factor: 0.99 - -# termination condition -max_step: 500 - -# sensor spec -output: [proprioception, rgb, highlight, depth, seg, ins_seg, task_obs] -# image -fisheye: false -image_width: 128 -image_height: 128 -vertical_fov: 120 -# depth -depth_low: 0.0 -depth_high: 10.0 - -# sensor noise -depth_noise_rate: 0.0 -scan_noise_rate: 0.0 diff --git a/igibson/configs/behavior_onboard_sensing_fetch.yaml b/igibson/configs/behavior_onboard_sensing_fetch.yaml deleted file mode 100644 index d30be756f..000000000 --- a/igibson/configs/behavior_onboard_sensing_fetch.yaml +++ /dev/null @@ -1,65 +0,0 @@ -# scene -scene: igibson -scene_id: Beechwood_1_int -clutter: false -build_graph: true -load_texture: true -pybullet_load_texture: true -should_open_all_doors: false - -# domain randomization -texture_randomization_freq: null -object_randomization_freq: null - -# robot -robot: - name: Fetch - action_type: continuous - action_normalize: true - base_name: null - scale: 1.0 - self_collision: true - rendering_params: null - assisted_grasping_mode: null - rigid_trunk: false - default_trunk_offset: 0.365 - default_arm_pose: diagonal30 - controller_config: - base: - name: DifferentialDriveController - arm: - name: InverseKinematicsController - kv: 2.0 - gripper: - name: ParallelJawGripperController - mode: binary - camera: - name: JointController - use_delta_commands: False - -# task -task: cleaning_cupboards -task_id: 0 -instance_id: 0 -online_sampling: false - -# discount factor -discount_factor: 0.99 - -# termination condition -max_step: 500 - -# sensor spec -output: [proprioception, rgb, highlight, depth, seg, ins_seg] -# image -fisheye: false -image_width: 128 -image_height: 128 -vertical_fov: 120 -# depth -depth_low: 0.0 -depth_high: 10.0 - -# sensor noise -depth_noise_rate: 0.0 -scan_noise_rate: 0.0 diff --git a/igibson/configs/behavior_vr.yaml b/igibson/configs/behavior_vr.yaml deleted file mode 100644 index 61ae59f1f..000000000 --- a/igibson/configs/behavior_vr.yaml +++ /dev/null @@ -1,45 +0,0 @@ -# scene -scene: igibson -scene_id: Beechwood_1_int -clutter: false -build_graph: true -load_texture: true -pybullet_load_texture: true -should_open_all_doors: false - -# domain randomization -texture_randomization_freq: null -object_randomization_freq: null - -# robot -robot: - name: BehaviorRobot - -# task -task: cleaning_cupboards -task_id: 0 -instance_id: 0 -online_sampling: false -should_activate_behavior_robot: false -should_highlight_task_relevant_objs: false - -# discount factor -discount_factor: 0.99 - -# termination condition -max_step: 500 - -# sensor spec -output: [proprioception] -# image -fisheye: false -image_width: 128 -image_height: 128 -vertical_fov: 120 -# depth -depth_low: 0.0 -depth_high: 10.0 - -# sensor noise -depth_noise_rate: 0.0 -scan_noise_rate: 0.0 diff --git a/igibson/configs/controllers/dd.yaml b/igibson/configs/controllers/dd.yaml deleted file mode 100644 index a109e7944..000000000 --- a/igibson/configs/controllers/dd.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# Example Differential Drive control config (shown for base control) -# See igibson/controllers/dd_controller for docstring of arguments -# Arguments below are the arguments that should be specified by external user (other kwargs -# used in constructor are generated automatically at runtime) -robot: - controller_config: - base: - name: DifferentialDriveController - command_input_limits: default - command_output_limits: default \ No newline at end of file diff --git a/igibson/configs/controllers/ik.yaml b/igibson/configs/controllers/ik.yaml deleted file mode 100644 index 0db6cb0bf..000000000 --- a/igibson/configs/controllers/ik.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Example IK config (shown for arm control) -# See igibson/controllers/ik_controller for docstring of arguments -# Arguments below are the arguments that should be specified by external user (other kwargs -# used in constructor are generated automatically at runtime) -robot: - controller_config: - arm: - name: InverseKinematicsController - command_input_limits: default - command_output_limits: - - [-0.2, -0.2, -0.2, -0.5, -0.5, -0.5] - - [0.2, 0.2, 0.2, 0.5, 0.5, 0.5] - kv: 2.0 - mode: pose_delta_ori - smoothing_filter_size: 2 - workspace_pose_limiter: null - joint_range_tolerance: 0.01 \ No newline at end of file diff --git a/igibson/configs/controllers/joint.yaml b/igibson/configs/controllers/joint.yaml deleted file mode 100644 index a967a4c88..000000000 --- a/igibson/configs/controllers/joint.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# Example Joint control config (shown for arm control) -# See igibson/controllers/joint_controller for docstring of arguments -# Arguments below are the arguments that should be specified by external user (other kwargs -# used in constructor are generated automatically at runtime) -robot: - controller_config: - arm: - name: JointController - motor_type: velocity - command_input_limits: default - command_output_limits: default - use_delta_commands: false - use_compliant_mode: true \ No newline at end of file diff --git a/igibson/configs/controllers/null_gripper.yaml b/igibson/configs/controllers/null_gripper.yaml deleted file mode 100644 index 845df9023..000000000 --- a/igibson/configs/controllers/null_gripper.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# Example Null Gripper control config (shown for gripper control) -# See igibson/controllers/null_gripper_controller for docstring of arguments -# Arguments below are the arguments that should be specified by external user (other kwargs -# used in constructor are generated automatically at runtime) -robot: - controller_config: - gripper: - name: NullGripperController - # no other args to specify (this is a dummy controller) \ No newline at end of file diff --git a/igibson/configs/controllers/parallel_jaw_gripper.yaml b/igibson/configs/controllers/parallel_jaw_gripper.yaml deleted file mode 100644 index 12f9282a2..000000000 --- a/igibson/configs/controllers/parallel_jaw_gripper.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# Example Parallel Jaw Gripper control config (shown for arm control) -# See igibson/controllers/parallel_jaw_gripper_controller for docstring of arguments -# Arguments below are the arguments that should be specified by external user (other kwargs -# used in constructor are generated automatically at runtime) -robot: - controller_config: - arm: - name: ParallelJawGripperController - motor_type: position - command_input_limits: default - command_output_limits: default - mode: binary - limit_tolerance: 0.01 \ No newline at end of file diff --git a/igibson/configs/fetch_reaching.yaml b/igibson/configs/fetch_reaching.yaml deleted file mode 100644 index a73f0ca36..000000000 --- a/igibson/configs/fetch_reaching.yaml +++ /dev/null @@ -1,97 +0,0 @@ -# scene -scene: igibson -scene_id: Rs_int -build_graph: true -load_texture: true -pybullet_load_texture: true -trav_map_type: no_obj -trav_map_resolution: 0.1 -trav_map_erosion: 3 -should_open_all_doors: true - -# domain randomization -texture_randomization_freq: null -object_randomization_freq: null - -# robot -robot: - name: Fetch - action_type: continuous - action_normalize: true - base_name: null - scale: 1.0 - self_collision: true - rendering_params: null - assisted_grasping_mode: null - rigid_trunk: false - default_trunk_offset: 0.365 - default_arm_pose: diagonal30 - controller_config: - base: - name: DifferentialDriveController - arm: - name: InverseKinematicsController - kv: 2.0 - gripper: - name: ParallelJawGripperController - mode: binary - camera: - name: JointController - use_delta_commands: False - -# task -task: reaching_random -target_dist_min: 1.0 -target_dist_max: 10.0 -goal_format: polar -task_obs_dim: 4 - -# reward -reward_type: l2 -success_reward: 10.0 -potential_reward_weight: 1.0 -collision_reward_weight: -0.1 - -# discount factor -discount_factor: 0.99 - -# termination condition -dist_tol: 0.36 # body width -max_step: 500 -max_collisions_allowed: 500 - -# misc config -initial_pos_z_offset: 0.1 -collision_ignore_link_a_ids: [0, 1, 2] # ignore collisions with these robot links - -# sensor spec -output: [task_obs, rgb, depth, scan] -# image -# Primesense Carmine 1.09 short-range RGBD sensor -# http://xtionprolive.com/primesense-carmine-1.09 -fisheye: false -image_width: 128 -image_height: 128 -vertical_fov: 90 -# depth -depth_low: 0.35 -depth_high: 3.0 -# scan -# SICK TIM571 scanning range finder -# https://docs.fetchrobotics.com/robot_hardware.html -# n_horizontal_rays is originally 661, sub-sampled 1/3 -n_horizontal_rays: 220 -n_vertical_beams: 1 -laser_linear_range: 25.0 -laser_angular_range: 220.0 -min_laser_dist: 0.05 -laser_link_name: laser_link - -# sensor noise -depth_noise_rate: 0.0 -scan_noise_rate: 0.0 - -# visual objects -visible_target: true -visible_path: false - diff --git a/igibson/configs/robots/ant.yaml b/igibson/configs/robots/ant.yaml deleted file mode 100644 index a95021661..000000000 --- a/igibson/configs/robots/ant.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Example Ant config -robot: - name: Ant - action_type: continuous - action_normalize: true - proprio_obs: - - joint_qpos - base_name: null - scale: 1.0 - self_collision: false - rendering_params: null - controller_config: - base: - name: JointController \ No newline at end of file diff --git a/igibson/configs/robots/fetch.yaml b/igibson/configs/robots/fetch.yaml deleted file mode 100644 index 70002a46e..000000000 --- a/igibson/configs/robots/fetch.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# Example Fetch config -robot: - name: Fetch - action_type: continuous - action_normalize: true - proprio_obs: - - eef_pos - - eef_quat - - trunk_qpos - - arm_qpos_sin - - arm_qpos_cos - - gripper_qpos - - grasp - base_name: null - scale: 1.0 - self_collision: true - rendering_params: null - assisted_grasping_mode: null - rigid_trunk: false - default_trunk_offset: 0.365 - default_arm_pose: vertical - controller_config: - base: - name: DifferentialDriveController - arm: - name: InverseKinematicsController - gripper: - name: ParallelJawGripperController - camera: - name: JointController \ No newline at end of file diff --git a/igibson/configs/robots/freight.yaml b/igibson/configs/robots/freight.yaml deleted file mode 100644 index c264fc3ac..000000000 --- a/igibson/configs/robots/freight.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Example Freight config -robot: - name: Freight - action_type: continuous - action_normalize: true - proprio_obs: - - dd_base_lin_vel - - dd_base_ang_vel - base_name: null - scale: 1.0 - self_collision: false - rendering_params: null - controller_config: - base: - name: DifferentialDriveController \ No newline at end of file diff --git a/igibson/configs/robots/husky.yaml b/igibson/configs/robots/husky.yaml deleted file mode 100644 index 77b77201b..000000000 --- a/igibson/configs/robots/husky.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Example Husky config -robot: - name: Husky - action_type: continuous - action_normalize: true - proprio_obs: - - joint_qpos - - joint_qvel - base_name: null - scale: 1.0 - self_collision: false - rendering_params: null - controller_config: - base: - name: JointController \ No newline at end of file diff --git a/igibson/configs/robots/jr2.yaml b/igibson/configs/robots/jr2.yaml deleted file mode 100644 index 2fa63e554..000000000 --- a/igibson/configs/robots/jr2.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Example JR2 config -robot: - name: JR2 - action_type: continuous - action_normalize: true - proprio_obs: - - eef_pos - - eef_quat - - arm_qpos_sin - - arm_qpos_cos - base_name: null - scale: 1.0 - self_collision: false - rendering_params: null - assisted_grasping_mode: null - controller_config: - base: - name: DifferentialDriveController - arm: - name: InverseKinematicsController - gripper: - name: NullGripperController \ No newline at end of file diff --git a/igibson/configs/robots/locobot.yaml b/igibson/configs/robots/locobot.yaml deleted file mode 100644 index 58806ffd9..000000000 --- a/igibson/configs/robots/locobot.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Example Locobot config -robot: - name: Locobot - action_type: continuous - action_normalize: true - proprio_obs: - - dd_base_lin_vel - - dd_base_ang_vel - base_name: null - scale: 1.0 - self_collision: false - rendering_params: null - controller_config: - base: - name: DifferentialDriveController \ No newline at end of file diff --git a/igibson/configs/robots/turtlebot.yaml b/igibson/configs/robots/turtlebot.yaml deleted file mode 100644 index 03394462b..000000000 --- a/igibson/configs/robots/turtlebot.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Example Turtlebot config -robot: - name: Turtlebot - action_type: continuous - action_normalize: true - proprio_obs: - - dd_base_lin_vel - - dd_base_ang_vel - base_name: null - scale: 1.0 - self_collision: true - rendering_params: null - controller_config: - base: - name: DifferentialDriveController \ No newline at end of file diff --git a/igibson/controllers/__init__.py b/igibson/controllers/__init__.py deleted file mode 100644 index 4367295c5..000000000 --- a/igibson/controllers/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -from igibson.controllers.controller_base import ( - REGISTERED_CONTROLLERS, - REGISTERED_LOCOMOTION_CONTROLLERS, - REGISTERED_MANIPULATION_CONTROLLERS, - ControlType, - LocomotionController, - ManipulationController, -) -from igibson.controllers.dd_controller import DifferentialDriveController -from igibson.controllers.ik_controller import InverseKinematicsController -from igibson.controllers.joint_controller import JointController -from igibson.controllers.null_gripper_controller import NullGripperController -from igibson.controllers.parallel_jaw_gripper_controller import ParallelJawGripperController -from igibson.utils.python_utils import assert_valid_key, extract_class_init_kwargs_from_dict - - -def create_controller(name, **kwargs): - """ - Creates a controller of type @name with corresponding necessary keyword arguments @kwargs - - :param name: str, type of controller to use (e.g. JointController, InverseKinematicsController, etc.) - :param kwargs: Any relevant keyword arguments to pass to the controller - - :return Controller: created controller - """ - assert_valid_key(key=name, valid_keys=REGISTERED_CONTROLLERS, name="controller") - controller_cls = REGISTERED_CONTROLLERS[name] - - return controller_cls(**kwargs) diff --git a/igibson/controllers/controller_base.py b/igibson/controllers/controller_base.py deleted file mode 100644 index 45e695c57..000000000 --- a/igibson/controllers/controller_base.py +++ /dev/null @@ -1,314 +0,0 @@ -from collections import Iterable - -import numpy as np - -from igibson.utils.python_utils import assert_valid_key - -# Global dicts that will contain mappings -REGISTERED_CONTROLLERS = {} -REGISTERED_LOCOMOTION_CONTROLLERS = {} -REGISTERED_MANIPULATION_CONTROLLERS = {} - - -def register_controller(cls): - if cls.__name__ not in REGISTERED_CONTROLLERS: - REGISTERED_CONTROLLERS[cls.__name__] = cls - - -def register_locomotion_controller(cls): - if cls.__name__ not in REGISTERED_LOCOMOTION_CONTROLLERS: - REGISTERED_LOCOMOTION_CONTROLLERS[cls.__name__] = cls - - -def register_manipulation_controller(cls): - if cls.__name__ not in REGISTERED_MANIPULATION_CONTROLLERS: - REGISTERED_MANIPULATION_CONTROLLERS[cls.__name__] = cls - - -# Define macros -class ControlType: - POSITION = 0 - VELOCITY = 1 - TORQUE = 2 - _MAPPING = { - "position": POSITION, - "velocity": VELOCITY, - "torque": TORQUE, - } - VALID_TYPES = set(_MAPPING.values()) - VALID_TYPES_STR = set(_MAPPING.keys()) - - @classmethod - def get_type(cls, type_str): - """ - :param type_str: One of "position", "velocity", or "torque" (any case), and maps it - to the corresponding type - - :return ControlType: control type corresponding to the associated string - """ - assert_valid_key(key=type_str.lower(), valid_keys=cls._MAPPING, name="control type") - return cls._MAPPING[type_str.lower()] - - -class BaseController: - """ - An abstract class with interface for mapping specific types of commands to deployable control signals. - """ - - def __init_subclass__(cls, **kwargs): - """ - Registers all subclasses as part of this registry. This is useful to decouple internal codebase from external - user additions. This way, users can add their custom controller by simply extending this Controller class, - and it will automatically be registered internally. This allows users to then specify their controller - directly in string-from in e.g., their config files, without having to manually set the str-to-class mapping - in our code. - """ - register_controller(cls) - - def __init__( - self, - control_freq, - control_limits, - joint_idx, - command_input_limits="default", - command_output_limits="default", - ): - """ - :param control_freq: int, controller loop frequency - :param control_limits: Dict[str, Tuple[Array[float], Array[float]]]: The min/max limits to the outputted - control signal. Should specify per-actuator type limits, i.e.: - - "position": [[min], [max]] - "velocity": [[min], [max]] - "torque": [[min], [max]] - "has_limit": [...bool...] - - Values outside of this range will be clipped, if the corresponding joint index in has_limit is True. - :param joint_idx: Array[int], specific joint indices controlled by this robot. Used for inferring - controller-relevant values during control computations - :param command_input_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]], - if set, is the min/max acceptable inputted command. Values outside of this range will be clipped. - If None, no clipping will be used. If "default", range will be set to (-1, 1) - :param command_output_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]], if set, - is the min/max scaled command. If both this value and @command_input_limits is not None, - then all inputted command values will be scaled from the input range to the output range. - If either is None, no scaling will be used. If "default", then this range will automatically be set - to the @control_limits entry corresponding to self.control_type - """ - # Store arguments - self.control_freq = control_freq - self.control_limits = {} - for motor_type in {"position", "velocity", "torque"}: - assert ( - motor_type in control_limits - ), "Expected motor_type {} to be in control_limits, but does not exist.".format(motor_type) - self.control_limits[ControlType.get_type(motor_type)] = [ - np.array(control_limits[motor_type][0]), - np.array(control_limits[motor_type][1]), - ] - assert "has_limit" in control_limits, "Expected has_limit specified in control_limits, but does not exist." - self.joint_has_limits = control_limits["has_limit"] - self._joint_idx = joint_idx - - # Initialize some other variables that will be filled in during runtime - self.control = None - self._command = None - self._command_scale_factor = None - self._command_output_transform = None - self._command_input_transform = None - - # Standardize command input / output limits to be (min_array, max_array) - command_input_limits = (-1.0, 1.0) if command_input_limits == "default" else command_input_limits - command_output_limits = ( - ( - np.array(self.control_limits[self.control_type][0])[self.joint_idx], - np.array(self.control_limits[self.control_type][1])[self.joint_idx], - ) - if command_output_limits == "default" - else command_output_limits - ) - self.command_input_limits = ( - None - if command_input_limits is None - else ( - self.nums2array(command_input_limits[0], self.command_dim), - self.nums2array(command_input_limits[1], self.command_dim), - ) - ) - self.command_output_limits = ( - None - if command_output_limits is None - else ( - self.nums2array(command_output_limits[0], self.command_dim), - self.nums2array(command_output_limits[1], self.command_dim), - ) - ) - - def _preprocess_command(self, command): - """ - Clips + scales inputted @command according to self.command_input_limits and self.command_output_limits. - If self.command_input_limits is None, then no clipping will occur. If either self.command_input_limits - or self.command_output_limits is None, then no scaling will occur. - - :param command: Array[float] or float, Inputted command vector - :return Array[float]: Processed command vector - """ - # Make sure command is a np.array - command = np.array([command]) if type(command) in {int, float} else np.array(command) - # We only clip and / or scale if self.command_input_limits exists - if self.command_input_limits is not None: - # Clip - command = command.clip(*self.command_input_limits) - if self.command_output_limits is not None: - # If we haven't calculated how to scale the command, do that now (once) - if self._command_scale_factor is None: - self._command_scale_factor = abs( - self.command_output_limits[1] - self.command_output_limits[0] - ) / abs(self.command_input_limits[1] - self.command_input_limits[0]) - self._command_output_transform = ( - self.command_output_limits[1] + self.command_output_limits[0] - ) / 2.0 - self._command_input_transform = (self.command_input_limits[1] + self.command_input_limits[0]) / 2.0 - # Scale command - command = ( - command - self._command_input_transform - ) * self._command_scale_factor + self._command_output_transform - - # Return processed command - return command - - def update_command(self, command): - """ - Updates inputted @command internally. - - :param command: Array[float], inputted command to store internally in this controller - """ - # Sanity check the command - assert len(command) == self.command_dim, "Commands must be dimension {}, got dim {} instead.".format( - self.command_dim, len(command) - ) - # Preprocess and store inputted command - self._command = self._preprocess_command(np.array(command)) - - def clip_control(self, control): - """ - Clips the inputted @control signal based on @control_limits. - - :param control: Array[float], control signal to clip - - :return Array[float]: Clipped control signal - """ - clipped_control = control.clip( - self.control_limits[self.control_type][0][self.joint_idx], - self.control_limits[self.control_type][1][self.joint_idx], - ) - idx = ( - self.joint_has_limits[self.joint_idx] - if self.control_type == ControlType.POSITION - else [True] * self.control_dim - ) - control[idx] = clipped_control[idx] - return control - - def step(self, control_dict): - """ - Take a controller step. - - :param control_dict: Dict[str, Any], dictionary that should include any relevant keyword-mapped - states necessary for controller computation - - :return Array[float]: numpy array of outputted control signals - """ - control = self._command_to_control(command=self._command, control_dict=control_dict) - self.control = self.clip_control(control=control) - return self.control - - def reset(self): - """ - Resets this controller. Should be implemented by subclass. - """ - raise NotImplementedError - - def _command_to_control(self, command, control_dict): - """ - Converts the (already preprocessed) inputted @command into deployable (non-clipped!) control signal. - Should be implemented by subclass. - - :param command: Array[float], desired (already preprocessed) command to convert into control signals - :param control_dict: Dict[str, Any], dictionary that should include any relevant keyword-mapped - states necessary for controller computation - - :return: Array[float], outputted (non-clipped!) control signal to deploy - """ - raise NotImplementedError - - @staticmethod - def nums2array(nums, dim): - """ - Convert input @nums into numpy array of length @dim. If @nums is a single number, broadcasts it to the - corresponding dimension size @dim before converting into a numpy array - Args: - nums (numeric or Iterable): Either single value or array of numbers - dim (int): Size of array to broadcast input to env.sim.data.actuator_force - Returns: - np.array: Array filled with values specified in @nums - """ - # First run sanity check to make sure no strings are being inputted - if isinstance(nums, str): - raise TypeError("Error: Only numeric inputs are supported for this function, nums2array!") - - # Check if input is an Iterable, if so, we simply convert the input to np.array and return - # Else, input is a single value, so we map to a numpy array of correct size and return - return np.array(nums) if isinstance(nums, Iterable) else np.ones(dim) * nums - - @property - def control_type(self): - """ - :return ControlType: Type of control returned by this controller - """ - raise NotImplementedError - - @property - def command_dim(self): - """ - :return int: Expected size of inputted commands - """ - raise NotImplementedError - - @property - def control_dim(self): - """ - :return int: Expected size of outputted controls - """ - return len(self.joint_idx) - - @property - def joint_idx(self): - """ - :return Array[int]: Joint indices corresponding to the specific joints being controlled by this robot - """ - return np.array(self._joint_idx) - - -class LocomotionController(BaseController): - """ - Controller to control locomotion. All implemented controllers that encompass locomotion capabilities should extend - from this class. - """ - - def __init_subclass__(cls, **kwargs): - # Register as part of locomotion controllers - super().__init_subclass__(**kwargs) - register_locomotion_controller(cls) - - -class ManipulationController(BaseController): - """ - Controller to control manipulation. All implemented controllers that encompass manipulation capabilities - should extend from this class. - """ - - def __init_subclass__(cls, **kwargs): - # Register as part of locomotion controllers - super().__init_subclass__(**kwargs) - register_manipulation_controller(cls) diff --git a/igibson/controllers/dd_controller.py b/igibson/controllers/dd_controller.py deleted file mode 100644 index 5e0436f82..000000000 --- a/igibson/controllers/dd_controller.py +++ /dev/null @@ -1,116 +0,0 @@ -import numpy as np - -from igibson.controllers import ControlType, LocomotionController - - -class DifferentialDriveController(LocomotionController): - """ - Differential drive (DD) controller for controlling two independently controlled wheeled joints. - - Each controller step consists of the following: - 1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits - 2. Convert desired (lin_vel, ang_vel) command into (left, right) wheel joint velocity control signals - 3. Clips the resulting command by the joint velocity limits - """ - - def __init__( - self, - wheel_radius, - wheel_axle_length, - control_freq, - control_limits, - joint_idx, - command_input_limits="default", - command_output_limits="default", - ): - """ - :param wheel_radius: float, radius of the wheels (both assumed to be same radius) - :param wheel_axle_length: float, perpendicular distance between the two wheels - :param control_freq: int, controller loop frequency - :param control_limits: Dict[str, Tuple[Array[float], Array[float]]]: The min/max limits to the outputted - control signal. Should specify per-actuator type limits, i.e.: - - "position": [[min], [max]] - "velocity": [[min], [max]] - "torque": [[min], [max]] - "has_limit": [...bool...] - - Values outside of this range will be clipped, if the corresponding joint index in has_limit is True. - Assumes order is [left_wheel, right_wheel] for each set of values. - :param joint_idx: Array[int], specific joint indices controlled by this robot. Used for inferring - controller-relevant values during control computations - :param command_input_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]], - if set, is the min/max acceptable inputted command. Values outside of this range will be clipped. - If None, no clipping will be used. If "default", range will be set to (-1, 1) - :param command_output_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]], if set, - is the min/max scaled command. If both this value and @command_input_limits is not None, - then all inputted command values will be scaled from the input range to the output range. - If either is None, no scaling will be used. If "default", then this range will automatically be set - to the maximum linear and angular velocities calculated from @wheel_radius, @wheel_axle_length, and - @control_limits velocity limits entry - """ - # Store internal variables - self.wheel_radius = wheel_radius - self.wheel_axle_halflength = wheel_axle_length / 2.0 - - # If we're using default command output limits, map this to maximum linear / angular velocities - if command_output_limits == "default": - min_vels = control_limits["velocity"][0][joint_idx] - assert ( - min_vels[0] == min_vels[1] - ), "Differential drive requires both wheel joints to have same min velocities!" - max_vels = control_limits["velocity"][1][joint_idx] - assert ( - max_vels[0] == max_vels[1] - ), "Differential drive requires both wheel joints to have same max velocities!" - assert abs(min_vels[0]) == abs( - max_vels[0] - ), "Differential drive requires both wheel joints to have same min and max absolute velocities!" - max_lin_vel = max_vels[0] * wheel_radius - max_ang_vel = max_lin_vel * 2.0 / wheel_axle_length - command_output_limits = ((-max_lin_vel, -max_ang_vel), (max_lin_vel, max_ang_vel)) - - # Run super init - super().__init__( - control_freq=control_freq, - control_limits=control_limits, - joint_idx=joint_idx, - command_input_limits=command_input_limits, - command_output_limits=command_output_limits, - ) - - def reset(self): - # No-op - pass - - def _command_to_control(self, command, control_dict): - """ - Converts the (already preprocessed) inputted @command into deployable (non-clipped!) joint control signal. - This processes converts the desired (lin_vel, ang_vel) command into (left, right) wheel joint velocity control - signals. - - :param command: Array[float], desired (already preprocessed) 2D command to convert into control signals - Consists of desired (lin_vel, ang_vel) of the controlled body - :param control_dict: Dict[str, Any], dictionary that should include any relevant keyword-mapped - states necessary for controller computation. Must include the following keys: - - :return: Array[float], outputted (non-clipped!) velocity control signal to deploy - to the [left, right] wheel joints - """ - lin_vel, ang_vel = command - - # Convert to wheel velocities - left_wheel_joint_vel = (lin_vel - ang_vel * self.wheel_axle_halflength) / self.wheel_radius - right_wheel_joint_vel = (lin_vel + ang_vel * self.wheel_axle_halflength) / self.wheel_radius - - # Return desired velocities - return np.array([left_wheel_joint_vel, right_wheel_joint_vel]) - - @property - def control_type(self): - return ControlType.VELOCITY - - @property - def command_dim(self): - # [lin_vel, ang_vel] - return 2 diff --git a/igibson/controllers/ik_controller.py b/igibson/controllers/ik_controller.py index c244d2d0c..0d48463f1 100644 --- a/igibson/controllers/ik_controller.py +++ b/igibson/controllers/ik_controller.py @@ -2,221 +2,167 @@ import pybullet as p import igibson.utils.transform_utils as T -from igibson.controllers import ControlType, ManipulationController from igibson.utils.filters import MovingAverageFilter # Different modes -IK_MODE_COMMAND_DIMS = { - "pose_absolute_ori": 6, # 6DOF (dx,dy,dz,ax,ay,az) control over pose, where the orientation is given in absolute axis-angle coordinates - "pose_delta_ori": 6, # 6DOF (dx,dy,dz,dax,day,daz) control over pose - "position_fixed_ori": 3, # 3DOF (dx,dy,dz) control over position, with orientation commands being kept as fixed initial absolute orientation - "position_compliant_ori": 3, # 3DOF (dx,dy,dz) control over position, with orientation commands automatically being sent as 0s (so can drift over time) +IK_MODES = { + "pose_absolute_ori", # 6DOF (dx,dy,dz,ax,ay,az) control over pose, where the orientation is given in absolute axis-angle coordinates + "pose_delta_ori", # 6DOF (dx,dy,dz,dax,day,daz) control over pose + "position_fixed_ori", # 3DOF (dx,dy,dz) control over position, with orientation commands being kept as fixed initial absolute orientation + "position_compliant_ori", # 3DOF (dx,dy,dz) control over position, with orientation commands automatically being sent as 0s (so can drift over time) } -IK_MODES = set(IK_MODE_COMMAND_DIMS.keys()) -class InverseKinematicsController(ManipulationController): +class IKController: """ - Controller class to convert (delta) EEF commands into joint velocities using Inverse Kinematics (IK). + Simple controller class to convert (delta) EEF commands into joint velocities - Each controller step consists of the following: - 1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits - 2. Run Inverse Kinematics to back out joint velocities for a desired task frame command - 3. Clips the resulting command by the motor (velocity) limits + Args: + robot (BaseRobot): Robot to control + config (dict): Config associated with this iG setup """ - def __init__( - self, - base_body_id, - task_link_id, - control_freq, - default_joint_pos, - joint_damping, - control_limits, - joint_idx, - command_input_limits="default", - command_output_limits=((-0.2, -0.2, -0.2, -0.5, -0.5, -0.5), (0.2, 0.2, 0.2, 0.5, 0.5, 0.5)), - kv=2.0, - mode="pose_delta_ori", - smoothing_filter_size=None, - workspace_pose_limiter=None, - joint_range_tolerance=0.01, - ): + def __init__(self, robot, config): + # Store internal variables + self.robot = robot + self.config = config + self.input_max = np.array(config["controller"]["input_max"]) + self.input_min = np.array(config["controller"]["input_min"]) + self.output_max = np.array(config["controller"]["output_max"]) + self.output_min = np.array(config["controller"]["output_min"]) + self.action_scale = abs(self.output_max - self.output_min) / abs(self.input_max - self.input_min) + self.action_output_transform = (self.output_max + self.output_min) / 2.0 + self.action_input_transform = (self.input_max + self.input_min) / 2.0 + self.lpf = MovingAverageFilter(obs_dim=len(self.robot.upper_joint_limits), filter_width=2) + + # Set mode and make sure it's valid + self.mode = config["controller"].get("mode", "pose_delta_ori") + assert self.mode in IK_MODES, f"Invalid IK mode specified. Valid options: {IK_MODES}. Got: {self.mode}" + + # Store global limits + self.eef_always_in_frame = config["controller"].get("eef_always_in_frame", False) + self.neutral_xy = config["controller"].get("neutral_xy", [0.25, 0]) + self.radius_limit = config["controller"].get("radius_limit", 0.5) + self.height_limits = config["controller"].get("height_limits", [0.2, 1.5]) + + # Get vertical and horizontal fov + self.vertical_fov = config["vertical_fov"] * np.pi / 180.0 + width, height = config["image_width"], config["image_height"] + self.horizontal_fov = 2 * np.arctan(np.tan(self.vertical_fov / 2.0) * width / height) + + # Create var to store orientation reference (may be necessary based on mode) + self.ori_target = None # quaternion + + def reset(self): """ - :param base_body_id: int, unique pybullet ID corresponding to the pybullet body being controlled by IK - :param task_link_id: int, pybullet link ID corresponding to the link within the body being controlled by IK - :param control_freq: int, controller loop frequency - :param default_joint_pos: Array[float], default joint positions, used as part of nullspace controller in IK - :param joint_damping: Array[float], joint damping parameters associated with each joint - in the body being controlled - :param control_limits: Dict[str, Tuple[Array[float], Array[float]]]: The min/max limits to the outputted - control signal. Should specify per-actuator type limits, i.e.: - - "position": [[min], [max]] - "velocity": [[min], [max]] - "torque": [[min], [max]] - "has_limit": [...bool...] - - Values outside of this range will be clipped, if the corresponding joint index in has_limit is True. - :param joint_idx: Array[int], specific joint indices controlled by this robot. Used for inferring - controller-relevant values during control computations - :param command_input_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]], - if set, is the min/max acceptable inputted command. Values outside of this range will be clipped. - If None, no clipping will be used. If "default", range will be set to (-1, 1) - :param command_output_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]], if set, - is the min/max scaled command. If both this value and @command_input_limits is not None, - then all inputted command values will be scaled from the input range to the output range. - If either is None, no scaling will be used. If "default", then this range will automatically be set - to the @control_limits entry corresponding to self.control_type - :param kv: float, Gain applied to error between IK-commanded joint positions and current joint positions - :param mode: str, mode to use when computing IK. In all cases, position commands are 3DOF delta (dx,dy,dz) - cartesian values, relative to the robot base frame. Valid options are: - - "pose_absolute_ori": 6DOF (dx,dy,dz,ax,ay,az) control over pose, - where the orientation is given in absolute axis-angle coordinates - - "pose_delta_ori": 6DOF (dx,dy,dz,dax,day,daz) control over pose - - "position_fixed_ori": 3DOF (dx,dy,dz) control over position, - with orientation commands being kept as fixed initial absolute orientation - - "position_compliant_ori": 3DOF (dx,dy,dz) control over position, - with orientation commands automatically being sent as 0s (so can drift over time) - :param smoothing_filter_size: None or int, if specified, sets the size of a moving average filter to apply - on all outputted IK joint positions. - :param workspace_pose_limiter: None or function, if specified, callback method that should clip absolute - target (x,y,z) cartesian position and absolute quaternion orientation (x,y,z,w) to a specific workspace - range (i.e.: this can be unique to each robot, and implemented by each embodiment). - Function signature should be: - - def limiter(command_pos: Array[float], command_quat: Array[float], control_dict: Dict[str, Any]) --> Tuple[Array[float], Array[float]] - - where pos_command is (x,y,z) cartesian position values, command_quat is (x,y,z,w) quarternion orientation - values, and the returned tuple is the processed (pos, quat) command. - :param joint_range_tolerance: float, amount to add to each side of the inputted joint range, to improve IK - convergence stability (e.g.: for joint_ranges = 0 for no limits, prevents NaNs from occurring) + Reset this controller """ - # Store arguments - control_dim = len(joint_damping) - self.control_filter = ( - None - if smoothing_filter_size in {None, 0} - else MovingAverageFilter(obs_dim=control_dim, filter_width=smoothing_filter_size) - ) - assert mode in IK_MODES, "Invalid ik mode specified! Valid options are: {IK_MODES}, got: {mode}" - self.mode = mode - self.kv = kv - self.workspace_pose_limiter = workspace_pose_limiter - self.base_body_id = base_body_id - self.task_link_id = task_link_id - self.default_joint_pos = np.array(default_joint_pos) - self.joint_damping = np.array(joint_damping) - self.joint_range_tolerance = joint_range_tolerance - - # Other variables that will be filled in at runtime - self._quat_target = None - - # Run super init - super().__init__( - control_freq=control_freq, - control_limits=control_limits, - joint_idx=joint_idx, - command_input_limits=command_input_limits, - command_output_limits=command_output_limits, - ) + self.lpf = MovingAverageFilter(obs_dim=len(self.robot.upper_joint_limits), filter_width=2) + self.ori_target = None - def reset(self): - # Reset the filter and clear internal control state - if self.control_filter is not None: - self.control_filter.reset() - self._quat_target = None + def scale_command(self, command): + """ + Scales the inputted action based on internal limits + + Args: + command (6-array): Inputted raw command - @staticmethod - def _pose_in_base_to_pose_in_world(pose_in_base, base_in_world): + Returns: + 6-array: Scaled command """ - Convert a pose in the base frame to a pose in the world frame. + # Clip command + command = np.clip(command, self.input_min, self.input_max) + return (command - self.action_input_transform) * self.action_scale + self.action_output_transform - :param pose_in_base: Tuple[Array[float], Array[float]], Cartesian xyz position, - quaternion xyzw orientation tuple corresponding to the desired pose in its local base frame - :param base_in_world: Tuple[Array[float], Array[float]], Cartesian xyz position, - quaternion xyzw orientation tuple corresponding to the base pose in the global static frame + def get_current_error(self, current, set_point): + """ + Returns an array of differences between the desired joint positions and current joint positions. + Useful for PID control. + + :param current: the current joint positions + :param set_point: the joint positions that are desired as a numpy array + :return: the current error in the joint positions + """ + error = current - set_point + return error + + def bullet_base_pose_to_world_pose(self, pose_in_base): + """ + Convert a pose in the base frame to a pose in the world frame. - :return Tuple[Array[float], Array[float]]: Cartesian xyz position, - quaternion xyzw orientation tuple corresponding to the desired pose in the global static frame + :param pose_in_base: a (pos, orn) tuple + :return pose_in world: a (pos, orn) tuple """ + pose_in_base_mat = T.pose2mat(pose_in_base) - base_pose_in_world_mat = T.pose2mat(base_in_world) + + base_pos_in_world = np.array(p.getBasePositionAndOrientation(self.robot.robot_ids[0])[0]) + base_orn_in_world = np.array(p.getBasePositionAndOrientation(self.robot.robot_ids[0])[1]) + base_pose_in_world_mat = T.pose2mat((base_pos_in_world, base_orn_in_world)) + pose_in_world_mat = T.pose_in_A_to_pose_in_B(pose_A=pose_in_base_mat, pose_A_in_B=base_pose_in_world_mat) return T.mat2pose(pose_in_world_mat) - def _command_to_control(self, command, control_dict): - """ - Converts the (already preprocessed) inputted @command into deployable (non-clipped!) joint control signal. - This processes the command based on self.mode, possibly clips the command based on self.workspace_pose_limiter, - - :param command: Array[float], desired (already preprocessed) command to convert into control signals - Is one of: - (dx,dy,dz) - desired delta cartesian position - (dx,dy,dz,dax,day,daz) - desired delta cartesian position and delta axis-angle orientation - (dx,dy,dz,ax,ay,az) - desired delta cartesian position and global axis-angle orientation - :param control_dict: Dict[str, Any], dictionary that should include any relevant keyword-mapped - states necessary for controller computation. Must include the following keys: - joint_position: Array of current joint positions - base_pos: (x,y,z) cartesian position of the robot's base relative to the static global frame - base_quat: (x,y,z,w) quaternion orientation of the robot's base relative to the static global frame - task_pos_relative: (x,y,z) relative cartesian position of the desired task frame to control, computed - in its local frame (e.g.: robot base frame) - task_quat_relative: (x,y,z,w) relative quaternion orientation of the desired task frame to control, - computed in its local frame (e.g.: robot base frame) - - :return: Array[float], outputted (non-clipped!) velocity control signal to deploy + def joint_positions_for_delta_command(self, delta): """ - # Grab important info from control dict - pos_relative = np.array(control_dict["task_pos_relative"]) - quat_relative = np.array(control_dict["task_quat_relative"]) + This function runs inverse kinematics to back out target joint positions + from the inputted delta command. - # The first three values of the command are always the (delta) position, convert to absolute values - dpos = command[:3] - target_pos = pos_relative + dpos + :param delta: a relative pose command defined by (dx, dy, dz, and optionally [dar, dap, day]) + + :return: A list of size @num_joints corresponding to the target joint angles. + """ + # Compute position + dpos = delta[:3] + target_pos = self.robot.get_relative_eef_position() + dpos + + # Clip values if they're past the limits + xy_vec = target_pos[:2] - self.neutral_xy # second value is "neutral" location + d = np.linalg.norm(xy_vec) + target_pos[:2] = (min(self.radius_limit, d) / d) * xy_vec + self.neutral_xy + target_pos[2] = np.clip(target_pos[2], *self.height_limits) + + # Also clip the target pos if we want to keep the eef in frame + if self.eef_always_in_frame: + # Calculate angle from base to eef in xy plane + angle = np.arctan2(target_pos[1], target_pos[0]) + # Clip angle appropriately + angle_clipped = np.clip(angle, -self.horizontal_fov / 2, self.horizontal_fov / 2) + # Project the original vector onto a unit vector pointing in the direction of the clipped angle + unit_xy_angle_clipped = np.array([np.cos(angle_clipped), np.sin(angle_clipped)]) + target_pos[:2] = np.dot(target_pos[:2], unit_xy_angle_clipped) * unit_xy_angle_clipped + + # print(f"target pos: {target_pos}") # Compute orientation if self.mode == "position_fixed_ori": # We need to grab the current robot orientation as the commanded orientation if there is none saved - if self._quat_target is None: - self._quat_target = quat_relative - target_quat = self._quat_target + if self.ori_target is None: + self.ori_target = self.robot.get_relative_eef_orientation() + target_quat = self.ori_target elif self.mode == "position_compliant_ori": # Target quat is simply the current robot orientation - target_quat = quat_relative + target_quat = self.robot.get_relative_eef_orientation() elif self.mode == "pose_absolute_ori": # Received "delta" ori is in fact the desired absolute orientation - target_quat = T.axisangle2quat(command[3:]) + target_quat = T.axisangle2quat(delta[3:]) else: # pose_delta_ori control # Grab dori and compute target ori - dori = T.quat2mat(T.axisangle2quat(command[3:])) - target_quat = T.mat2quat(dori @ T.quat2mat(quat_relative)) - - # Possibly limit to workspace if specified - if self.workspace_pose_limiter is not None: - target_pos, target_quat = self.workspace_pose_limiter(target_pos, target_quat, control_dict) + dori = T.quat2mat(T.axisangle2quat(delta[3:])) + target_quat = T.mat2quat(dori @ T.quat2mat(self.robot.get_relative_eef_orientation())) # Convert to world frame - target_pos, target_quat = self._pose_in_base_to_pose_in_world( - pose_in_base=(target_pos, target_quat), - base_in_world=(np.array(control_dict["base_pos"]), np.array(control_dict["base_quat"])), - ) + target_pos, target_quat = self.bullet_base_pose_to_world_pose((target_pos, target_quat)) # Calculate and return IK-backed out joint angles - joint_targets = self._calc_joint_angles_from_ik(target_pos=target_pos, target_quat=target_quat) - - # Grab the resulting error and scale it by the velocity gain - u = -self.kv * (control_dict["joint_position"] - joint_targets) - - # Return these commanded velocities, (only the relevant joint idx) - return u[self.joint_idx] + return self.calc_joint_angles_from_ik(target_pos=target_pos, target_quat=target_quat) - def _calc_joint_angles_from_ik(self, target_pos, target_quat): + def calc_joint_angles_from_ik(self, target_pos, target_quat): """ Solves for joint angles given the ik target position and orientation Note that this outputs joint angles for the entire pybullet robot body! It is the responsibility of the - associated Robot class to filter out the redundant / non-impact joints from the computation + associated BaseRobot class to filter out the redundant / non-impact joints from the computation Args: target_pos (3-array): absolute (x, y, z) eef position command (in robot base frame) @@ -225,35 +171,47 @@ def _calc_joint_angles_from_ik(self, target_pos, target_quat): Returns: n-array: corresponding joint positions to match the inputted targets """ + # Update robot state + self.robot.calc_state() + # Run IK cmd_joint_pos = np.array( p.calculateInverseKinematics( - bodyIndex=self.base_body_id, - endEffectorLinkIndex=self.task_link_id, + bodyIndex=self.robot.robot_ids[0], + endEffectorLinkIndex=self.robot.eef_link_id, targetPosition=target_pos.tolist(), targetOrientation=target_quat.tolist(), - lowerLimits=(self.control_limits[ControlType.POSITION][0] - self.joint_range_tolerance).tolist(), - upperLimits=(self.control_limits[ControlType.POSITION][1] + self.joint_range_tolerance).tolist(), - jointRanges=( - self.control_limits[ControlType.POSITION][1] - - self.control_limits[ControlType.POSITION][0] - + 2 * self.joint_range_tolerance - ).tolist(), - restPoses=self.default_joint_pos.tolist(), - jointDamping=self.joint_damping.tolist(), + lowerLimits=self.robot.lower_joint_limits.tolist(), + upperLimits=self.robot.upper_joint_limits.tolist(), + jointRanges=self.robot.joint_range.tolist(), + restPoses=self.robot.untucked_default_joints.tolist(), + jointDamping=self.robot.joint_damping.tolist(), ) ) - - # Optionally pass through smoothing filter for better stability - if self.control_filter is not None: - cmd_joint_pos = self.control_filter.estimate(cmd_joint_pos) + cmd_joint_pos = self.lpf.estimate(np.array(cmd_joint_pos)) return cmd_joint_pos - @property - def control_type(self): - return ControlType.VELOCITY + def control(self, command): + """ + Execute IK control, given @command. + + Args: + command (6-array): a DELTA relative pose command defined by (dx, dy, dz, dar, dap, day) + + Returns: + n-array: commanded joint velocities to achieve the inputted @command. + """ + # First, scale command + command = self.scale_command(command) + + # Get desired joint positions from IK solver + cmd_joint_pos = self.joint_positions_for_delta_command(delta=command) + + # Grab the resulting error and scale it by the velocity gain + cmd_joint_vel = self.config["controller"]["kv_vel"] * self.get_current_error( + current=self.robot.joint_position, set_point=cmd_joint_pos + ) - @property - def command_dim(self): - return IK_MODE_COMMAND_DIMS[self.mode] + # Return these commanded velocities + return cmd_joint_vel diff --git a/igibson/controllers/joint_controller.py b/igibson/controllers/joint_controller.py deleted file mode 100644 index 5a1d5b70d..000000000 --- a/igibson/controllers/joint_controller.py +++ /dev/null @@ -1,124 +0,0 @@ -import numpy as np - -from igibson.controllers import ControlType, LocomotionController, ManipulationController -from igibson.utils.python_utils import assert_valid_key - - -class JointController(LocomotionController, ManipulationController): - """ - Controller class for joint control. Because pybullet can handle direct position / velocity / torque - control signals, this is merely a pass-through operation from command to control (with clipping / scaling built in). - - Each controller step consists of the following: - 1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits - 2a. If using delta commands, then adds the command to the current joint state - 2b. Clips the resulting command by the motor limits - """ - - def __init__( - self, - control_freq, - motor_type, - control_limits, - joint_idx, - command_input_limits="default", - command_output_limits="default", - use_delta_commands=False, - use_compliant_mode=True, - ): - """ - :param control_freq: int, controller loop frequency - :param motor_type: str, type of motor being controlled, one of {position, velocity, torque} - :param control_limits: Dict[str, Tuple[Array[float], Array[float]]]: The min/max limits to the outputted - control signal. Should specify per-actuator type limits, i.e.: - - "position": [[min], [max]] - "velocity": [[min], [max]] - "torque": [[min], [max]] - "has_limit": [...bool...] - - Values outside of this range will be clipped, if the corresponding joint index in has_limit is True. - :param joint_idx: Array[int], specific joint indices controlled by this robot. Used for inferring - controller-relevant values during control computations - :param command_input_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]], - if set, is the min/max acceptable inputted command. Values outside of this range will be clipped. - If None, no clipping will be used. If "default", range will be set to (-1, 1) - :param command_output_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]], if set, - is the min/max scaled command. If both this value and @command_input_limits is not None, - then all inputted command values will be scaled from the input range to the output range. - If either is None, no scaling will be used. If "default", then this range will automatically be set - to the @control_limits entry corresponding to self.control_type - :param use_delta_commands: bool, whether inputted commands should be interpreted as delta or absolute values - :param use_compliant_mode: bool, only relevant if @use_delta_command is True. If True, will add delta commands - to the current observed joint states. Otherwise, will store initial references - to these values and add the delta values to them. Note that setting this to False can only be used with - "position" motor control, and may be useful for e.g.: setting an initial large delta value, and then sending - subsequent zero commands, which can converge faster than sending individual small delta commands - sequentially. - """ - # Store arguments - assert_valid_key(key=motor_type.lower(), valid_keys=ControlType.VALID_TYPES_STR, name="motor_type") - self.motor_type = motor_type.lower() - self.use_delta_commands = use_delta_commands - self.use_compliant_mode = use_compliant_mode - - # If we're using compliant mode, make sure we're using joint position control (this doesn't make sense for - # velocity or torque control) - if not self.use_compliant_mode: - assert self.motor_type == "position", f"If not using compliant mode, motor control type must be position!" - - # Other variables that will be used at runtime - self._joint_target = None - - # Run super init - super().__init__( - control_freq=control_freq, - control_limits=control_limits, - joint_idx=joint_idx, - command_input_limits=command_input_limits, - command_output_limits=command_output_limits, - ) - - def reset(self): - # Clear the target - self._joint_target = None - - def _command_to_control(self, command, control_dict): - """ - Converts the (already preprocessed) inputted @command into deployable (non-clipped!) joint control signal - - :param command: Array[float], desired (already preprocessed) command to convert into control signals - :param control_dict: Dict[str, Any], dictionary that should include any relevant keyword-mapped - states necessary for controller computation. Must include the following keys: - joint_position: Array of current joint positions - joint_velocity: Array of current joint velocities - joint_torque: Array of current joint torques - - :return: Array[float], outputted (non-clipped!) control signal to deploy - """ - # If we're using delta commands, add this value - if self.use_delta_commands: - if self.use_compliant_mode: - # Add this to the current observed state - u = control_dict["joint_{}".format(self.motor_type)][self.joint_idx] + command - else: - # Otherwise, we add to the internally stored target state - # (also, initialize the target if we haven't done so already) - if self._joint_target is None: - self._joint_target = np.array(control_dict["joint_{}".format(self.motor_type)][self.joint_idx]) - self._joint_target = self._joint_target + command - u = self._joint_target - # Otherwise, control is simply the command itself - else: - u = command - - # Return control - return u - - @property - def control_type(self): - return ControlType.get_type(type_str=self.motor_type) - - @property - def command_dim(self): - return len(self.joint_idx) diff --git a/igibson/controllers/null_gripper_controller.py b/igibson/controllers/null_gripper_controller.py deleted file mode 100644 index 4b8af716c..000000000 --- a/igibson/controllers/null_gripper_controller.py +++ /dev/null @@ -1,90 +0,0 @@ -import numpy as np - -from igibson.controllers import ControlType, ManipulationController -from igibson.utils.python_utils import assert_valid_key - -VALID_MODES = { - "binary", - "smooth", - "independent", -} - - -class NullGripperController(ManipulationController): - """ - Dummy Controller class for non-prehensile gripper control (i.e.: no control). - This class has a zero-size command space, and returns an empty array for control - """ - - def __init__( - self, - control_freq, - control_limits, - command_input_limits="default", - command_output_limits="default", - ): - """ - :param control_freq: int, controller loop frequency - :param control_limits: Dict[str, Tuple[Array[float], Array[float]]]: The min/max limits to the outputted - control signal. Should specify per-actuator type limits, i.e.: - - "position": [[min], [max]] - "velocity": [[min], [max]] - "torque": [[min], [max]] - "has_limit": [...bool...] - - Values outside of this range will be clipped, if the corresponding joint index in has_limit is True. - :param command_input_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]], - if set, is the min/max acceptable inputted command. Values outside of this range will be clipped. - If None, no clipping will be used. If "default", range will be set to (-1, 1) - :param command_output_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]], if set, - is the min/max scaled command. If both this value and @command_input_limits is not None, - then all inputted command values will be scaled from the input range to the output range. - If either is None, no scaling will be used. If "default", then this range will automatically be set - to the @control_limits entry corresponding to self.control_type - """ - # Immediately run super init - - # Run super init - super().__init__( - control_freq=control_freq, - control_limits=control_limits, - joint_idx=np.array([], dtype=np.int), # no joints controlled - command_input_limits=command_input_limits, - command_output_limits=command_output_limits, - ) - - def reset(self): - # No-op - pass - - def _preprocess_command(self, command): - # No action - return command - - def clip_control(self, control): - # No action - return control - - def _command_to_control(self, command, control_dict): - """ - Converts the (already preprocessed) inputted @command into deployable (non-clipped!) gripper - joint control signal. Since this is a null controller, command should be an empty numpy array - and this function will equivalently return an empty array - - :param command: Array[float], desired (already preprocessed) command to convert into control signals. - This should always be 2D command for each gripper joint (Empty in this case) - :param control_dict: Dict[str, Any], dictionary that should include any relevant keyword-mapped - states necessary for controller computation. - - :return: Array[float], outputted (non-clipped!) control signal to deploy. This is an empty np.array - """ - return np.array([]) - - @property - def control_type(self): - return ControlType.POSITION - - @property - def command_dim(self): - return 0 diff --git a/igibson/controllers/parallel_jaw_gripper_controller.py b/igibson/controllers/parallel_jaw_gripper_controller.py deleted file mode 100644 index 7fbe2f9f8..000000000 --- a/igibson/controllers/parallel_jaw_gripper_controller.py +++ /dev/null @@ -1,143 +0,0 @@ -import numpy as np - -from igibson.controllers import ControlType, ManipulationController -from igibson.utils.python_utils import assert_valid_key - -VALID_MODES = { - "binary", - "smooth", - "independent", -} - - -class ParallelJawGripperController(ManipulationController): - """ - Controller class for parallel jaw gripper control. This either interprets an input as a binary - command (open / close), continuous command (open / close with scaled velocities), or per-joint continuous command - - Each controller step consists of the following: - 1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits - 2a. Convert command into gripper joint control signals - 2b. Clips the resulting control by the motor limits - """ - - def __init__( - self, - control_freq, - motor_type, - control_limits, - joint_idx, - command_input_limits="default", - command_output_limits="default", - mode="binary", - limit_tolerance=0.001, - ): - """ - :param control_freq: int, controller loop frequency - :param control_limits: Dict[str, Tuple[Array[float], Array[float]]]: The min/max limits to the outputted - control signal. Should specify per-actuator type limits, i.e.: - - "position": [[min], [max]] - "velocity": [[min], [max]] - "torque": [[min], [max]] - "has_limit": [...bool...] - - Values outside of this range will be clipped, if the corresponding joint index in has_limit is True. - :param joint_idx: Array[int], specific joint indices controlled by this robot. Used for inferring - controller-relevant values during control computations - :param command_input_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]], - if set, is the min/max acceptable inputted command. Values outside of this range will be clipped. - If None, no clipping will be used. If "default", range will be set to (-1, 1) - :param command_output_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]], if set, - is the min/max scaled command. If both this value and @command_input_limits is not None, - then all inputted command values will be scaled from the input range to the output range. - If either is None, no scaling will be used. If "default", then this range will automatically be set - to the @control_limits entry corresponding to self.control_type - :param mode: str, mode for this controller. Valid options are: - - "binary": 1D command, if preprocessed value > 0 is interpreted as an max open - (send max pos / vel / tor signal), otherwise send max close control signals - "smooth": 1D command, sends symmetric signal to both finger joints equal to the preprocessed commands - "independent": 2D command, sends independent signals to each finger joint equal to the preprocessed command - :param limit_tolerance: float, sets the tolerance from the joint limit ends, below which controls will be zeroed - out if the control is using velocity or torque control - """ - # Store arguments - assert_valid_key(key=motor_type.lower(), valid_keys=ControlType.VALID_TYPES_STR, name="motor_type") - self.motor_type = motor_type.lower() - assert_valid_key(key=mode, valid_keys=VALID_MODES, name="mode for parallel jaw gripper") - self.mode = mode - self.limit_tolerance = limit_tolerance - - # If we're using binary signal, we override the command output limits - if mode == "binary": - command_output_limits = (-1.0, 1.0) - - # Run super init - super().__init__( - control_freq=control_freq, - control_limits=control_limits, - joint_idx=joint_idx, - command_input_limits=command_input_limits, - command_output_limits=command_output_limits, - ) - - def reset(self): - # No-op - pass - - def _preprocess_command(self, command): - # We extend this method to make sure command is always 2D - if self.mode != "independent": - command = np.array([command] * 2) if type(command) in {int, float} else np.array([command[0]] * 2) - - # Return from super method - return super()._preprocess_command(command=command) - - def _command_to_control(self, command, control_dict): - """ - Converts the (already preprocessed) inputted @command into deployable (non-clipped!) gripper - joint control signal - - :param command: Array[float], desired (already preprocessed) command to convert into control signals. - This should always be 2D command for each gripper joint - :param control_dict: Dict[str, Any], dictionary that should include any relevant keyword-mapped - states necessary for controller computation. Must include the following keys: - joint_position: Array of current joint positions - - :return: Array[float], outputted (non-clipped!) control signal to deploy - """ - joint_pos = control_dict["joint_position"][self.joint_idx] - # Choose what to do based on control mode - if self.mode == "binary": - # Use max control signal - u = ( - self.control_limits[ControlType.POSITION][1][self.joint_idx] - if command[0] >= 0.0 - else self.control_limits[ControlType.POSITION][0][self.joint_idx] - ) - else: - # Use continuous signal - u = command - - # If we're near the joint limits and we're using velocity / torque control, we zero out the action - if self.motor_type in {"velocity", "torque"}: - violate_upper_limit = ( - joint_pos > self.control_limits[ControlType.POSITION][1][self.joint_idx] - self.limit_tolerance - ) - violate_lower_limit = ( - joint_pos < self.control_limits[ControlType.POSITION][0][self.joint_idx] + self.limit_tolerance - ) - violation = np.logical_or(violate_upper_limit * (u > 0), violate_lower_limit * (u < 0)) - u *= ~violation - - # Return control - return u - - @property - def control_type(self): - return ControlType.get_type(type_str=self.motor_type) - - @property - def command_dim(self): - return 2 if self.mode == "independent" else 1 diff --git a/igibson/envs/behavior_env.py b/igibson/envs/behavior_env.py new file mode 100644 index 000000000..0775385a0 --- /dev/null +++ b/igibson/envs/behavior_env.py @@ -0,0 +1,412 @@ +import argparse +import datetime +import os +import time +from collections import OrderedDict + +import bddl +import gym.spaces +import numpy as np +from bddl.condition_evaluation import evaluate_state + +from igibson.activity.activity_base import iGBEHAVIORActivityInstance +from igibson.envs.igibson_env import iGibsonEnv +from igibson.robots.behavior_robot import BehaviorRobot +from igibson.robots.fetch_gripper_robot import FetchGripper +from igibson.utils.checkpoint_utils import load_checkpoint +from igibson.utils.ig_logging import IGLogWriter +from igibson.objects.articulated_object import URDFObject +from igibson.object_states.on_floor import RoomFloor + + +class BehaviorEnv(iGibsonEnv): + """ + iGibson Environment (OpenAI Gym interface) + """ + + def __init__( + self, + config_file, + scene_id=None, + mode="headless", + action_timestep=1 / 30.0, + physics_timestep=1 / 300.0, + device_idx=0, + render_to_tensor=False, + automatic_reset=False, + seed=0, + action_filter="mobile_manipulation", + instance_id=0, + episode_save_dir=None, + rng=None + ): + """ + :param config_file: config_file path + :param scene_id: override scene_id in config file + :param mode: headless, gui, iggui + :param action_timestep: environment executes action per action_timestep second + :param physics_timestep: physics timestep for pybullet + :param device_idx: which GPU to run the simulation and rendering on + :param render_to_tensor: whether to render directly to pytorch tensors + :param automatic_reset: whether to automatic reset after an episode finishes + """ + self.action_filter = action_filter + self.instance_id = instance_id + super(BehaviorEnv, self).__init__( + config_file=config_file, + scene_id=scene_id, + mode=mode, + action_timestep=action_timestep, + physics_timestep=physics_timestep, + device_idx=device_idx, + render_to_tensor=render_to_tensor, + ) + if rng is None: + self.rng = np.random.default_rng(seed=seed) + else: + self.rng = rng + self.automatic_reset = automatic_reset + self.reward_potential = None + self.episode_save_dir = episode_save_dir + if self.episode_save_dir is not None: + os.makedirs(self.episode_save_dir, exist_ok=True) + self.task_relevant_objects = [ + item + for item in self.task.object_scope.values() + if isinstance(item, URDFObject) or isinstance(item, RoomFloor) + ] + self.num_objects = len(self.task_relevant_objects) + + self.log_writer = None + + # Set iggui viewing position to start at the robot's initial position. + robot_init_pos = self.simulator.robots[0].get_position() + if self.simulator.viewer is not None: + self.simulator.viewer.px = robot_init_pos[0] + self.simulator.viewer.py = robot_init_pos[1] + self.simulator.viewer.pz = robot_init_pos[2] + + # Make sure different parallel environments will have different random seeds + # np.random.seed(os.getpid()) + # np.random.seed(0) # For now, force this to be 0 so we have reproducible behavior! + + def load_action_space(self): + """ + Load action space + """ + if isinstance(self.robots[0], BehaviorRobot): + if self.action_filter == "navigation": + self.action_space = gym.spaces.Box(shape=(3,), low=-1.0, high=1.0, dtype=np.float32) + elif self.action_filter == "mobile_manipulation": + self.action_space = gym.spaces.Box(shape=(17,), low=-1.0, high=1.0, dtype=np.float32) + elif self.action_filter == "tabletop_manipulation": + self.action_space = gym.spaces.Box(shape=(7,), low=-1.0, high=1.0, dtype=np.float32) + else: + self.action_space = gym.spaces.Box(shape=(26,), low=-1.0, high=1.0, dtype=np.float32) + elif isinstance(self.robots[0], FetchGripper): + if self.action_filter == "navigation": + self.action_space = gym.spaces.Box(shape=(2,), low=-1.0, high=1.0, dtype=np.float32) + elif self.action_filter == "tabletop_manipulation": + self.action_space = gym.spaces.Box(shape=(7,), low=-1.0, high=1.0, dtype=np.float32) + else: + self.action_space = gym.spaces.Box(shape=(11,), low=-1.0, high=1.0, dtype=np.float32) + else: + Exception("Only BehaviorRobot and FetchGripper are supported for behavior_env") + + def load_behavior_task_setup(self): + """ + Load task setup + """ + # task + task = self.config["task"] + task_id = self.config["task_id"] + scene_id = self.config["scene_id"] + clutter = self.config["clutter"] + online_sampling = self.config["online_sampling"] + # if online_sampling: + # scene_kwargs = {} + # else: + scene_kwargs = { + "urdf_file": "{}_task_{}_{}_{}_fixed_furniture".format(scene_id, task, task_id, self.instance_id), + } + bddl.set_backend("iGibson") + robot_class = self.config.get("robot") + if robot_class == "BehaviorRobot": + robot_type = BehaviorRobot + elif robot_class == "FetchGripper": + robot_type = FetchGripper + else: + Exception("Only BehaviorRobot and FetchGripper are supported for behavior_env") + + self.task = iGBEHAVIORActivityInstance(task, task_id, robot_type=robot_type, robot_config=self.config) + self.task.initialize_simulator( + simulator=self.simulator, + scene_id=scene_id, + load_clutter=clutter, + scene_kwargs=scene_kwargs, + online_sampling=online_sampling, + ) + + for _, obj in self.task.object_scope.items(): + if obj.category in ["agent", "room_floor"]: + continue + obj.highlight() + + self.scene = self.task.scene + self.robots = self.simulator.robots + + self.reset_checkpoint_idx = self.config.get("reset_checkpoint_idx", -1) + self.reset_checkpoint_dir = self.config.get("reset_checkpoint_dir", None) + + def load_task_setup(self): + self.initial_pos_z_offset = self.config.get("initial_pos_z_offset", 0.1) + # s = 0.5 * G * (t ** 2) + drop_distance = 0.5 * 9.8 * (self.action_timestep ** 2) + assert drop_distance < self.initial_pos_z_offset, "initial_pos_z_offset is too small for collision checking" + + # ignore the agent's collision with these body ids + self.collision_ignore_body_b_ids = set(self.config.get("collision_ignore_body_b_ids", [])) + # ignore the agent's collision with these link ids of itself + self.collision_ignore_link_a_ids = set(self.config.get("collision_ignore_link_a_ids", [])) + + # discount factor + self.discount_factor = self.config.get("discount_factor", 0.99) + + # domain randomization frequency + self.texture_randomization_freq = self.config.get("texture_randomization_freq", None) + self.object_randomization_freq = self.config.get("object_randomization_freq", None) + + self.load_behavior_task_setup() + + # Activate the robot constraints so that we don't need to feed in + # trigger press action in the first couple frames + if isinstance(self.robots[0], BehaviorRobot): + self.robots[0].activate() + + def load(self): + """ + Load environment + """ + self.load_task_setup() + self.load_observation_space() + self.load_action_space() + self.load_miscellaneous_variables() + + def load_observation_space(self): + super(BehaviorEnv, self).load_observation_space() + if "proprioception" in self.output: + proprioception_dim = self.robots[0].get_proprioception_dim() + self.observation_space.spaces["proprioception"] = gym.spaces.Box( + low=-np.inf, high=np.inf, shape=(proprioception_dim,) + ) + self.observation_space = gym.spaces.Dict(self.observation_space.spaces) + + def step(self, action, save_video=False, task_name=""): + """ + Apply robot's action. + Returns the next state, reward, done and info, + following OpenAI Gym's convention + + :param action: robot actions + :return: state: next observation + :return: reward: reward of this time step + :return: done: whether the episode is terminated + :return: info: info dictionary with any useful information + """ + self.current_step += 1 + + if isinstance(self.robots[0], BehaviorRobot): + new_action = np.zeros((28,)) + if self.action_filter == "navigation": + new_action[:2] = action[:2] + new_action[5] = action[2] + elif self.action_filter == "mobile_manipulation": + # body x,y,yaw + new_action[:2] = action[:2] + new_action[5] = action[2] + # left hand 7d + new_action[12:19] = action[3:10] + # right hand 7d + new_action[20:27] = action[10:17] + elif self.action_filter == "tabletop_manipulation": + # only using right hand + new_action[20:27] = action[:7] + else: + # all action dims except hand reset + new_action[:19] = action[:19] + new_action[20:27] = action[19:] + # The original action space for BehaviorRobot is too wide for random exploration + # new_action *= 0.05 + elif isinstance(self.robots[0], FetchGripper): + new_action = np.zeros((11,)) + if self.action_filter == "navigation": + new_action[:2] = action[:2] + elif self.action_filter == "tabletop_manipulation": + new_action[4:] = action[:7] + else: + new_action = action + else: + Exception("Only BehaviorRobot and FetchGripper are supported for behavior_env") + + self.robots[0].apply_action(new_action) + if self.log_writer is not None: + self.log_writer.process_frame() + self.simulator.step(save_video=save_video, task_name=task_name) + + state = self.get_state() + info = {} + done, satisfied_predicates = self.task.check_success() + # Compute the initial reward potential here instead of during reset + # because if an intermediate checkpoint is loaded, we need step the + # simulator before calling task.check_success + if self.current_step == 1: + self.reward_potential = self.get_potential(satisfied_predicates) + + if self.current_step >= self.config["max_step"]: + done = True + reward, info = self.get_reward(satisfied_predicates) + + self.populate_info(info) + + if done and self.automatic_reset: + info["last_observation"] = state + state = self.reset() + + return state, reward, done, info + + def get_potential(self, satisfied_predicates): + potential = 0.0 + + # Evaluate the first ground goal state option as the potential + _, satisfied_predicates = evaluate_state(self.task.ground_goal_state_options[0]) + success_score = len(satisfied_predicates["satisfied"]) / ( + len(satisfied_predicates["satisfied"]) + len(satisfied_predicates["unsatisfied"]) + ) + predicate_potential = success_score + potential += predicate_potential + + return potential + + def get_reward(self, satisfied_predicates): + new_potential = self.get_potential(satisfied_predicates) + reward = new_potential - self.reward_potential + self.reward_potential = new_potential + return reward, {"satisfied_predicates": satisfied_predicates} + + def get_state(self, collision_links=[]): + """ + Get the current observation + + :param collision_links: collisions from last physics timestep + :return: observation as a dictionary + """ + state = OrderedDict() + if "task_obs" in self.output: + state["task_obs"] = self.task.get_task_obs(self) + if "vision" in self.sensors: + vision_obs = self.sensors["vision"].get_obs(self) + for modality in vision_obs: + state[modality] = vision_obs[modality] + if "scan_occ" in self.sensors: + scan_obs = self.sensors["scan_occ"].get_obs(self) + for modality in scan_obs: + state[modality] = scan_obs[modality] + if "bump" in self.sensors: + state["bump"] = self.sensors["bump"].get_obs(self) + + if "proprioception" in self.output: + state["proprioception"] = np.array(self.robots[0].get_proprioception()) + + return state + + def reset_scene_and_agent(self): + if self.reset_checkpoint_dir is not None and self.reset_checkpoint_idx != -1: + load_checkpoint(self.simulator, self.reset_checkpoint_dir, self.reset_checkpoint_idx) + else: + self.task.reset_scene(snapshot_id=self.task.initial_state) + # set the constraints to the current poses + self.robots[0].apply_action(np.zeros(self.robots[0].action_dim)) + + def reset(self): + """ + Reset episode + """ + # if self.log_writer is not None, save previous episode + if self.log_writer is not None: + self.log_writer.end_log_session() + del self.log_writer + self.log_writer = None + + timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + task = self.config["task"] + task_id = self.config["task_id"] + scene = self.config["scene_id"] + if self.episode_save_dir: + vr_log_path = os.path.join( + self.episode_save_dir, + "{}_{}_{}_{}_{}.hdf5".format(task, task_id, scene, timestamp, self.current_episode), + ) + self.log_writer = IGLogWriter( + self.simulator, + frames_before_write=200, + log_filepath=vr_log_path, + task=self.task, + store_vr=False, + vr_robot=self.robots[0], + filter_objects=True, + ) + self.log_writer.set_up_data_storage() + + self.reset_scene_and_agent() + + self.simulator.sync(force_sync=True) + state = self.get_state() + self.reset_variables() + + return state + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--config", + "-c", + default="igibson/examples/configs/behavior_onboard_sensing_fetch.yaml", + help="which config file to use [default: use yaml files in examples/configs]", + ) + parser.add_argument( + "--mode", + "-m", + choices=["headless", "gui", "iggui", "pbgui"], + default="iggui", + help="which mode for simulation (default: headless)", + ) + parser.add_argument( + "--action_filter", + "-af", + choices=["navigation", "tabletop_manipulation", "mobile_manipulation", "all"], + default="mobile_manipulation", + help="which action filter", + ) + args = parser.parse_args() + + env = BehaviorEnv( + config_file=args.config, + mode=args.mode, + action_timestep=1.0 / 30.0, + physics_timestep=1.0 / 300.0, + action_filter=args.action_filter, + episode_save_dir=None, + ) + step_time_list = [] + for episode in range(100): + print("Episode: {}".format(episode)) + start = time.time() + env.reset() + for i in range(1000): # 10 seconds + action = env.action_space.sample() + state, reward, done, _ = env.step(action) + if done: + break + print("Episode finished after {} timesteps, took {} seconds.".format(env.current_step, time.time() - start)) + env.close() diff --git a/igibson/envs/behavior_mp_env.py b/igibson/envs/behavior_mp_env.py index 883e9f62f..f046b2c57 100644 --- a/igibson/envs/behavior_mp_env.py +++ b/igibson/envs/behavior_mp_env.py @@ -1,4 +1,5 @@ import argparse +import logging import time from enum import IntEnum @@ -14,19 +15,17 @@ from igibson.objects.articulated_object import URDFObject from igibson.robots.behavior_robot import BRBody, BREye, BRHand from igibson.utils.behavior_robot_planning_utils import dry_run_base_plan, plan_base_motion_br, plan_hand_motion_br -from igibson.utils.utils import restoreState + +NUM_ACTIONS = 6 class ActionPrimitives(IntEnum): NAVIGATE_TO = 0 - LEFT_GRASP = 1 - RIGHT_GRASP = 2 - LEFT_PLACE_ONTOP = 3 - RIGHT_PLACE_ONTOP = 4 - LEFT_PLACE_INSIDE = 5 - RIGHT_PLACE_INSIDE = 6 - OPEN = 7 - CLOSE = 8 + GRASP = 1 + PLACE_ONTOP = 2 + PLACE_INSIDE = 3 + OPEN = 4 + CLOSE = 5 def get_aabb_volume(lo, hi): @@ -47,12 +46,11 @@ def detect_collision(bodyA, object_in_hand=None): def detect_robot_collision(robot): - left_object_in_hand = robot.links["left_hand"].object_in_hand - right_object_in_hand = robot.links["right_hand"].object_in_hand + object_in_hand = robot.parts["right_hand"].object_in_hand return ( - detect_collision(robot.links["body"].get_body_id()) - or detect_collision(robot.links["left_hand"].get_body_id(), left_object_in_hand) - or detect_collision(robot.links["right_hand"].get_body_id(), right_object_in_hand) + detect_collision(robot.parts["body"].body_id) + or detect_collision(robot.parts["left_hand"].body_id) + or detect_collision(robot.parts["right_hand"].body_id, object_in_hand) ) @@ -69,24 +67,21 @@ def __init__( action_timestep=1 / 10.0, physics_timestep=1 / 240.0, device_idx=0, + render_to_tensor=False, automatic_reset=False, seed=0, action_filter="mobile_manipulation", use_motion_planning=False, - activity_relevant_objects_only=True, ): """ - @param config_file: config_file path - @param scene_id: override scene_id in config file - :param mode: headless, headless_tensor, gui_interactive, gui_non_interactive - @param action_timestep: environment executes action per action_timestep second - @param physics_timestep: physics timestep for pybullet - @param device_idx: which GPU to run the simulation and rendering on - @param automatic_reset: whether to automatic reset after an episode finishes - @param seed: RNG seed for sampling - @param action_filter: see BehaviorEnv - @param use_motion_planning: Whether motion-planned primitives or magic primitives should be used - @param activity_relevant_objects_only: Whether the actions should be parameterized by AROs or all scene objs. + :param config_file: config_file path + :param scene_id: override scene_id in config file + :param mode: headless, gui, iggui + :param action_timestep: environment executes action per action_timestep second + :param physics_timestep: physics timestep for pybullet + :param device_idx: which GPU to run the simulation and rendering on + :param render_to_tensor: whether to render directly to pytorch tensors + :param automatic_reset: whether to automatic reset after an episode finishes """ super(BehaviorMPEnv, self).__init__( config_file=config_file, @@ -95,29 +90,24 @@ def __init__( action_timestep=action_timestep, physics_timestep=physics_timestep, device_idx=device_idx, + render_to_tensor=render_to_tensor, action_filter=action_filter, seed=seed, automatic_reset=automatic_reset, ) + self.obj_in_hand = None self.use_motion_planning = use_motion_planning - self.activity_relevant_objects_only = activity_relevant_objects_only self.robots[0].initial_z_offset = 0.7 def load_action_space(self): - if self.activity_relevant_objects_only: - self.addressable_objects = [ - item - for item in self.task.object_scope.values() - if isinstance(item, URDFObject) or isinstance(item, RoomFloor) - ] - else: - self.addressable_objects = list( - set(self.task.simulator.scene.objects_by_name.values()) | set(self.task.object_scope.values()) - ) - - self.num_objects = len(self.addressable_objects) - self.action_space = gym.spaces.Discrete(self.num_objects * len(ActionPrimitives)) + self.task_relevant_objects = [ + item + for item in self.task.object_scope.values() + if isinstance(item, URDFObject) or isinstance(item, RoomFloor) + ] + self.num_objects = len(self.task_relevant_objects) + self.action_space = gym.spaces.Discrete(self.num_objects * NUM_ACTIONS) def get_body_ids(self, include_self=False): ids = [] @@ -126,158 +116,356 @@ def get_body_ids(self, include_self=False): ids.extend(object.body_ids) if include_self: - ids.append(self.robots[0].links["left_hand"].get_body_id()) - ids.append(self.robots[0].links["body"].get_body_id()) + ids.append(self.robots[0].parts["left_hand"].get_body_id()) + ids.append(self.robots[0].parts["body"].get_body_id()) return ids - def reset_and_release_hand(self, hand): + def reset_and_release_hand(self): self.robots[0].set_position_orientation(self.robots[0].get_position(), self.robots[0].get_orientation()) for _ in range(100): - self.robots[0].links[hand].set_close_fraction(0) - self.robots[0].links[hand].trigger_fraction = 0 + self.robots[0].parts["right_hand"].set_close_fraction(0) + self.robots[0].parts["right_hand"].trigger_fraction = 0 p.stepSimulation() def step(self, action): obj_list_id = int(action) % self.num_objects action_primitive = int(action) // self.num_objects - obj = self.addressable_objects[obj_list_id] + obj = self.task_relevant_objects[obj_list_id] if not (isinstance(obj, BRBody) or isinstance(obj, BRHand) or isinstance(obj, BREye)): if action_primitive == ActionPrimitives.NAVIGATE_TO: - if self.navigate_to_obj(obj): - print("PRIMITIVE: navigate to {} success".format(obj.name)) + if self.navigate_to_obj(obj, use_motion_planning=self.use_motion_planning): + logging.debug("PRIMITIVE: navigate to {} success".format(obj.name)) else: - print("PRIMITIVE: navigate to {} fail".format(obj.name)) + logging.debug("PRIMITIVE: navigate to {} fail".format(obj.name)) - elif action_primitive == ActionPrimitives.RIGHT_GRASP or action_primitive == ActionPrimitives.LEFT_GRASP: - hand = "right_hand" if action_primitive == ActionPrimitives.RIGHT_GRASP else "left_hand" - obj_in_hand_id = self.robots[0].links[hand].object_in_hand - obj_in_hand = self.scene.objects_by_id[obj_in_hand_id] if obj_in_hand_id is not None else None - if obj_in_hand is None: + elif action_primitive == ActionPrimitives.GRASP: + if self.obj_in_hand is None: if isinstance(obj, URDFObject) and hasattr(obj, "states") and object_states.AABB in obj.states: lo, hi = obj.states[object_states.AABB].get_value() volume = get_aabb_volume(lo, hi) - if volume < 0.2 * 0.2 * 0.2 and not obj.main_body_is_fixed: # we can only grasp small objects - self.navigate_if_needed(obj) - self.grasp_obj(obj, hand) - obj_in_hand_id = self.robots[0].links[hand].object_in_hand - obj_in_hand = ( - self.scene.objects_by_id[obj_in_hand_id] if obj_in_hand_id is not None else None - ) - print("PRIMITIVE: grasp {} success, obj in hand {}".format(obj.name, obj_in_hand)) + if ( + volume < 0.2 * 0.2 * 0.2 and not obj.main_body_is_fixed + ): # say we can only grasp small objects + if ( + np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) + < 2 + ): + self.grasp_obj(obj, use_motion_planning=self.use_motion_planning) + logging.debug( + "PRIMITIVE: grasp {} success, obj in hand {}".format(obj.name, self.obj_in_hand) + ) + else: + logging.debug("PRIMITIVE: grasp {} fail, too far".format(obj.name)) else: - print("PRIMITIVE: grasp {} fail, too big or fixed".format(obj.name)) - elif ( - action_primitive == ActionPrimitives.LEFT_PLACE_ONTOP - or action_primitive == ActionPrimitives.RIGHT_PLACE_ONTOP - ): - hand = "right_hand" if action_primitive == ActionPrimitives.RIGHT_PLACE_ONTOP else "left_hand" - obj_in_hand_id = self.robots[0].links[hand].object_in_hand - obj_in_hand = self.scene.objects_by_id[obj_in_hand_id] if obj_in_hand_id is not None else None - if obj_in_hand is not None and obj_in_hand != obj: - print("PRIMITIVE:attempt to place {} ontop {}".format(obj_in_hand.name, obj.name)) + logging.debug("PRIMITIVE: grasp {} fail, too big or fixed".format(obj.name)) + elif action_primitive == ActionPrimitives.PLACE_ONTOP: + if self.obj_in_hand is not None and self.obj_in_hand != obj: + logging.debug("PRIMITIVE:attempt to place {} ontop {}".format(self.obj_in_hand.name, obj.name)) if isinstance(obj, URDFObject): - self.navigate_if_needed(obj) + if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2: + state = p.saveState() + result = sample_kinematics( + "onTop", + self.obj_in_hand, + obj, + True, + use_ray_casting_method=True, + max_trials=20, + skip_falling=True, + ) + if result: + logging.debug( + "PRIMITIVE: place {} ontop {} success".format(self.obj_in_hand.name, obj.name) + ) + pos = self.obj_in_hand.get_position() + orn = self.obj_in_hand.get_orientation() + self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning) + else: + p.removeState(state) + logging.debug( + "PRIMITIVE: place {} ontop {} fail, sampling fail".format( + self.obj_in_hand.name, obj.name + ) + ) + else: + logging.debug( + "PRIMITIVE: place {} ontop {} fail, too far".format(self.obj_in_hand.name, obj.name) + ) + else: state = p.saveState() result = sample_kinematics( - "onTop", - obj_in_hand, + "onFloor", + self.obj_in_hand, obj, True, use_ray_casting_method=True, max_trials=20, + skip_falling=True, ) - if result: - pos = obj_in_hand.get_position() - orn = obj_in_hand.get_orientation() - self.place_obj(state, pos, orn, hand) - print("PRIMITIVE: place {} ontop {} success".format(obj_in_hand.name, obj.name)) + logging.debug( + "PRIMITIVE: place {} ontop {} success".format(self.obj_in_hand.name, obj.name) + ) + pos = self.obj_in_hand.get_position() + orn = self.obj_in_hand.get_orientation() + self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning) else: + logging.debug( + "PRIMITIVE: place {} ontop {} fail, sampling fail".format( + self.obj_in_hand.name, obj.name + ) + ) p.removeState(state) - print("PRIMITIVE: place {} ontop {} fail, sampling fail".format(obj_in_hand.name, obj.name)) + + elif action_primitive == ActionPrimitives.PLACE_INSIDE: + if self.obj_in_hand is not None and self.obj_in_hand != obj and isinstance(obj, URDFObject): + logging.debug("PRIMITIVE:attempt to place {} inside {}".format(self.obj_in_hand.name, obj.name)) + if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2: + if ( + hasattr(obj, "states") + and object_states.Open in obj.states + and obj.states[object_states.Open].get_value() + ) or (hasattr(obj, "states") and not object_states.Open in obj.states): + state = p.saveState() + result = sample_kinematics( + "inside", + self.obj_in_hand, + obj, + True, + use_ray_casting_method=True, + max_trials=20, + skip_falling=True, + ) + if result: + logging.debug( + "PRIMITIVE: place {} inside {} success".format(self.obj_in_hand.name, obj.name) + ) + pos = self.obj_in_hand.get_position() + orn = self.obj_in_hand.get_orientation() + self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning) + else: + logging.debug( + "PRIMITIVE: place {} inside {} fail, sampling fail".format( + self.obj_in_hand.name, obj.name + ) + ) + p.removeState(state) + else: + logging.debug( + "PRIMITIVE: place {} inside {} fail, need open not open".format( + self.obj_in_hand.name, obj.name + ) + ) else: - state = p.saveState() - result = sample_kinematics( - "onFloor", obj_in_hand, obj, True, use_ray_casting_method=True, max_trials=20 + logging.debug( + "PRIMITIVE: place {} inside {} fail, too far".format(self.obj_in_hand.name, obj.name) ) - if result: - print("PRIMITIVE: place {} ontop {} success".format(obj_in_hand.name, obj.name)) - pos = obj_in_hand.get_position() - orn = obj_in_hand.get_orientation() - self.place_obj(state, pos, orn, hand) + elif action_primitive == ActionPrimitives.OPEN: + if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2: + if hasattr(obj, "states") and object_states.Open in obj.states: + obj.states[object_states.Open].set_value(True) + else: + logging.debug("PRIMITIVE open failed, cannot be opened") + else: + logging.debug("PRIMITIVE open failed, too far") + + elif action_primitive == ActionPrimitives.CLOSE: + if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2: + if hasattr(obj, "states") and object_states.Open in obj.states: + obj.states[object_states.Open].set_value(False) + else: + logging.debug("PRIMITIVE close failed, cannot be opened") + else: + logging.debug("PRIMITIVE close failed, too far") + + state, reward, done, info = super(BehaviorMPEnv, self).step(np.zeros(17)) + logging.debug("PRIMITIVE satisfied predicates:", info["satisfied_predicates"]) + return state, reward, done, info + + def step_with_exec_info(self, action): + """ + Same as the above step method, but returns an additional param (action_exec_status) + that is True if the high-level action execution succeeded and False otherwise + """ + obj_list_id = int(action) % self.num_objects + action_primitive = int(action) // self.num_objects + action_exec_status = False + + obj = self.task_relevant_objects[obj_list_id] + if not (isinstance(obj, BRBody) or isinstance(obj, BRHand) or isinstance(obj, BREye)): + if action_primitive == ActionPrimitives.NAVIGATE_TO: + if self.navigate_to_obj(obj, use_motion_planning=self.use_motion_planning): + print("PRIMITIVE: navigate to {} success".format(obj.name)) + action_exec_status = True + else: + print("PRIMITIVE: navigate to {} fail".format(obj.name)) + + elif action_primitive == ActionPrimitives.GRASP: + if self.obj_in_hand is None: + if isinstance(obj, URDFObject) and hasattr(obj, "states") and object_states.AABB in obj.states: + lo, hi = obj.states[object_states.AABB].get_value() + volume = get_aabb_volume(lo, hi) + if ( + volume < 0.2 * 0.2 * 0.2 and not obj.main_body_is_fixed + ): # say we can only grasp small objects + if ( + np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) + < 2 + ): + self.grasp_obj(obj, use_motion_planning=self.use_motion_planning) + print( + "PRIMITIVE: grasp {} success, obj in hand {}".format(obj.name, self.obj_in_hand) + ) + action_exec_status = True + else: + print("PRIMITIVE: grasp {} fail, too far".format(obj.name)) else: - print("PRIMITIVE: place {} ontop {} fail, sampling fail".format(obj_in_hand.name, obj.name)) - p.removeState(state) + print("PRIMITIVE: grasp {} fail, too big or fixed".format(obj.name)) + elif action_primitive == ActionPrimitives.PLACE_ONTOP: + if self.obj_in_hand is not None and self.obj_in_hand != obj: + print("PRIMITIVE:attempt to place {} ontop {}".format(self.obj_in_hand.name, obj.name)) - elif ( - action_primitive == ActionPrimitives.LEFT_PLACE_INSIDE - or action_primitive == ActionPrimitives.RIGHT_PLACE_INSIDE - ): - hand = "right_hand" if action_primitive == ActionPrimitives.RIGHT_PLACE_INSIDE else "left_hand" - obj_in_hand_id = self.robots[0].links[hand].object_in_hand - obj_in_hand = self.scene.objects_by_id[obj_in_hand_id] if obj_in_hand_id is not None else None - if obj_in_hand is not None and obj_in_hand != obj and isinstance(obj, URDFObject): - print("PRIMITIVE:attempt to place {} inside {}".format(obj_in_hand.name, obj.name)) - if ( - hasattr(obj, "states") - and object_states.Open in obj.states - and obj.states[object_states.Open].get_value() - ) or (hasattr(obj, "states") and not object_states.Open in obj.states): - self.navigate_if_needed(obj) + if isinstance(obj, URDFObject): + if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2: + state = p.saveState() + result = sample_kinematics( + "onTop", + self.obj_in_hand, + obj, + True, + use_ray_casting_method=True, + max_trials=20, + skip_falling=True, + ) + if result: + print( + "PRIMITIVE: place {} ontop {} success".format(self.obj_in_hand.name, obj.name) + ) + action_exec_status = True + pos = self.obj_in_hand.get_position() + orn = self.obj_in_hand.get_orientation() + self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning) + else: + p.removeState(state) + print( + "PRIMITIVE: place {} ontop {} fail, sampling fail".format( + self.obj_in_hand.name, obj.name + ) + ) + else: + print( + "PRIMITIVE: place {} ontop {} fail, too far".format(self.obj_in_hand.name, obj.name) + ) + else: state = p.saveState() result = sample_kinematics( - "inside", - obj_in_hand, + "onFloor", + self.obj_in_hand, obj, True, use_ray_casting_method=True, max_trials=20, + skip_falling=True, ) - if result: - pos = obj_in_hand.get_position() - orn = obj_in_hand.get_orientation() - self.place_obj(state, pos, orn, hand) - print("PRIMITIVE: place {} inside {} success".format(obj_in_hand.name, obj.name)) + print( + "PRIMITIVE: place {} ontop {} success".format(self.obj_in_hand.name, obj.name) + ) + action_exec_status = True + pos = self.obj_in_hand.get_position() + orn = self.obj_in_hand.get_orientation() + self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning) else: print( - "PRIMITIVE: place {} inside {} fail, sampling fail".format(obj_in_hand.name, obj.name) + "PRIMITIVE: place {} ontop {} fail, sampling fail".format( + self.obj_in_hand.name, obj.name + ) ) p.removeState(state) + + elif action_primitive == ActionPrimitives.PLACE_INSIDE: + if self.obj_in_hand is not None and self.obj_in_hand != obj and isinstance(obj, URDFObject): + print("PRIMITIVE:attempt to place {} inside {}".format(self.obj_in_hand.name, obj.name)) + if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2: + if ( + hasattr(obj, "states") + and object_states.Open in obj.states + and obj.states[object_states.Open].get_value() + ) or (hasattr(obj, "states") and not object_states.Open in obj.states): + state = p.saveState() + result = sample_kinematics( + "inside", + self.obj_in_hand, + obj, + True, + use_ray_casting_method=True, + max_trials=20, + skip_falling=True, + ) + if result: + print( + "PRIMITIVE: place {} inside {} success".format(self.obj_in_hand.name, obj.name) + ) + action_exec_status = True + pos = self.obj_in_hand.get_position() + orn = self.obj_in_hand.get_orientation() + self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning) + else: + print( + "PRIMITIVE: place {} inside {} fail, sampling fail".format( + self.obj_in_hand.name, obj.name + ) + ) + p.removeState(state) + else: + print( + "PRIMITIVE: place {} inside {} fail, need open not open".format( + self.obj_in_hand.name, obj.name + ) + ) else: print( - "PRIMITIVE: place {} inside {} fail, need open not open".format(obj_in_hand.name, obj.name) + "PRIMITIVE: place {} inside {} fail, too far".format(self.obj_in_hand.name, obj.name) ) elif action_primitive == ActionPrimitives.OPEN: - self.navigate_if_needed(obj) - - if hasattr(obj, "states") and object_states.Open in obj.states: - obj.states[object_states.Open].set_value(True, fully=True) + if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2: + if hasattr(obj, "states") and object_states.Open in obj.states: + action_exec_status = True + print("PRIMITIVE open succeeded") + obj.states[object_states.Open].set_value(True) + else: + print("PRIMITIVE open failed, cannot be opened") else: - print("PRIMITIVE open failed, cannot be opened") + print("PRIMITIVE open failed, too far") elif action_primitive == ActionPrimitives.CLOSE: - self.navigate_if_needed(obj) - - if hasattr(obj, "states") and object_states.Open in obj.states: - obj.states[object_states.Open].set_value(False) + if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2: + if hasattr(obj, "states") and object_states.Open in obj.states: + action_exec_status = True + obj.states[object_states.Open].set_value(False) + print("PRIMITIVE close succeeded") + else: + print("PRIMITIVE close failed, cannot be opened") else: - print("PRIMITIVE close failed, cannot be opened") + print("PRIMITIVE close failed, too far") + + else: + print(f"Attempted to execute a High Level Action whose target was the robot's body! This is incorrect, please pass a sensible high level action argument!") state, reward, done, info = super(BehaviorMPEnv, self).step(np.zeros(17)) print("PRIMITIVE satisfied predicates:", info["satisfied_predicates"]) - return state, reward, done, info + return state, reward, done, info, action_exec_status - def grasp_obj(self, obj, hand): - if self.use_motion_planning: + def grasp_obj(self, obj, use_motion_planning=False): + if use_motion_planning: x, y, _ = obj.get_position() z = obj.states[object_states.AABB].get_value()[1][2] - hand_x, hand_y, hand_z = self.robots[0].links[hand].get_position() + hand_x, hand_y, hand_z = self.robots[0].parts["right_hand"].get_position() # add a little randomness to avoid getting stuck x += np.random.uniform(-0.025, 0.025) @@ -300,81 +488,84 @@ def grasp_obj(self, obj, hand): hand_limits=((minx, miny, minz), (maxx, maxy, maxz)), obstacles=self.get_body_ids(include_self=True), ) - restoreState(state) + p.restoreState(state) p.removeState(state) if plan is not None: - grasp_success = self.execute_grasp_plan(plan, obj, hand) - print("grasp success", grasp_success) - if not grasp_success: - print("grasp failed") - self.reset_and_release_hand(hand) + grasp_success = self.execute_grasp_plan(plan, obj) + logging.debug("grasp success", grasp_success) + if grasp_success: + self.obj_in_hand = obj + else: + logging.debug("grasp failed") + self.reset_and_release_hand() else: - print("plan is None") - self.reset_and_release_hand(hand) - # reset hand + logging.debug("plan is None") + self.robots[0].set_position_orientation(self.robots[0].get_position(), self.robots[0].get_orientation()) + self.reset_and_release_hand() else: - obj.set_position(np.array(self.robots[0].links[hand].get_position())) # - np.array([0,0,0.05]) - self.robots[0].links[hand].set_close_fraction(1) - self.robots[0].links[hand].trigger_fraction = 1 + self.obj_in_hand = obj + obj.set_position(np.array(self.robots[0].parts["right_hand"].get_position())) + self.robots[0].parts["right_hand"].set_close_fraction(1) + self.robots[0].parts["right_hand"].trigger_fraction = 1 p.stepSimulation() - obj.set_position(np.array(self.robots[0].links[hand].get_position())) - self.robots[0].links[hand].handle_assisted_grasping( + obj.set_position(np.array(self.robots[0].parts["right_hand"].get_position())) + self.robots[0].parts["right_hand"].handle_assisted_grasping( np.zeros( 28, ), - override_ag_data=(obj.get_body_id(), -1), + override_ag_data=(obj.body_id[0], -1), ) - def execute_grasp_plan(self, plan, obj, hand): + def execute_grasp_plan(self, plan, obj): for x, y, z, roll, pitch, yaw in plan: - self.robots[0].links[hand].move([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw])) + self.robots[0].parts["right_hand"].move([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw])) p.stepSimulation() x, y, z, roll, pitch, yaw = plan[-1] + # lower the hand until it touches the object for i in range(25): - self.robots[0].links[hand].move([x, y, z - i * 0.005], p.getQuaternionFromEuler([roll, pitch, yaw])) + self.robots[0].parts["right_hand"].move([x, y, z - i * 0.005], p.getQuaternionFromEuler([roll, pitch, yaw])) p.stepSimulation() + # close the hand for _ in range(50): - self.robots[0].links[hand].set_close_fraction(1) - self.robots[0].links[hand].trigger_fraction = 1 + self.robots[0].parts["right_hand"].set_close_fraction(1) + self.robots[0].parts["right_hand"].trigger_fraction = 1 p.stepSimulation() grasp_success = ( self.robots[0] - .links[hand] + .parts["right_hand"] .handle_assisted_grasping( np.zeros( 28, ), - override_ag_data=(obj.get_body_id(), -1), + override_ag_data=(obj.body_id[0], -1), ) ) + # reverse the plan and get object close to torso for x, y, z, roll, pitch, yaw in plan[::-1]: - self.robots[0].links[hand].move([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw])) + self.robots[0].parts["right_hand"].move([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw])) p.stepSimulation() return grasp_success - def place_obj(self, original_state, target_pos, target_orn, hand): - obj_in_hand_id = self.robots[0].links[hand].object_in_hand - obj_in_hand = self.scene.objects_by_id[obj_in_hand_id] - - pos = obj_in_hand.get_position() - restoreState(original_state) + def place_obj(self, original_state, target_pos, target_orn, use_motion_planning=False): + pos = self.obj_in_hand.get_position() + p.restoreState(original_state) p.removeState(original_state) - if not self.use_motion_planning: - self.reset_and_release_hand(hand) - - self.robots[0].links[hand].force_release_obj() - obj_in_hand.set_position_orientation(target_pos, target_orn) + if not use_motion_planning: + self.reset_and_release_hand() + self.robots[0].parts["right_hand"].force_release_obj() + self.obj_in_hand.set_position_orientation(target_pos, target_orn) + self.obj_in_hand = None else: x, y, z = target_pos - hand_x, hand_y, hand_z = self.robots[0].links[hand].get_position() + hand_x, hand_y, hand_z = self.robots[0].parts["right_hand"].get_position() minx = min(x, hand_x) - 1 miny = min(y, hand_y) - 1 @@ -385,30 +576,34 @@ def place_obj(self, original_state, target_pos, target_orn, hand): state = p.saveState() obstacles = self.get_body_ids() - obstacles.remove(self.obj_in_hand.get_body_id()) + obstacles.remove(self.obj_in_hand.body_id[0]) plan = plan_hand_motion_br( robot=self.robots[0], - obj_in_hand=obj_in_hand, + obj_in_hand=self.obj_in_hand, end_conf=[x, y, z + 0.1, 0, np.pi * 5 / 6.0, 0], hand_limits=((minx, miny, minz), (maxx, maxy, maxz)), obstacles=obstacles, ) # - restoreState(state) + p.restoreState(state) p.removeState(state) if plan: for x, y, z, roll, pitch, yaw in plan: - self.robots[0].links[hand].move([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw])) + self.robots[0].parts["right_hand"].move([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw])) p.stepSimulation() + released_obj = self.obj_in_hand + self.obj_in_hand = None - self.reset_and_release_hand(hand) + # release hand + self.reset_and_release_hand() # force release object to avoid dealing with stateful AG release mechanism - self.robots[0].links[hand].force_release_obj() + self.robots[0].parts["right_hand"].force_release_obj() self.robots[0].set_position_orientation(self.robots[0].get_position(), self.robots[0].get_orientation()) + # reset hand # reset the released object to zero velocity - p.resetBaseVelocity(obj_in_hand.get_body_id(), linearVelocity=[0, 0, 0], angularVelocity=[0, 0, 0]) + p.resetBaseVelocity(released_obj.get_body_id(), linearVelocity=[0, 0, 0], angularVelocity=[0, 0, 0]) # let object fall for _ in range(100): @@ -420,22 +615,27 @@ def sample_fn(self): theta = np.random.uniform(*CIRCULAR_LIMITS) return (x, y, theta) - def navigate_to_obj(self, obj): + def navigate_to_obj(self, obj, use_motion_planning=False): # test agent positions around an obj # try to place the agent near the object, and rotate it to the object valid_position = None # ((x,y,z),(roll, pitch, yaw)) original_position = self.robots[0].get_position() original_orientation = self.robots[0].get_orientation() + if isinstance(obj, URDFObject): distance_to_try = [0.6, 1.2, 1.8, 2.4] obj_pos = obj.get_position() for distance in distance_to_try: - for _ in range(20): + for _ in range(40): yaw = np.random.uniform(-np.pi, np.pi) - pos = [obj_pos[0] + distance * np.cos(yaw), obj_pos[1] + distance * np.sin(yaw), 0.7] + pos = [ + obj_pos[0] + distance * np.cos(yaw), + obj_pos[1] + distance * np.sin(yaw), + self.robots[0].initial_z_offset, + ] orn = [0, 0, yaw - np.pi] self.robots[0].set_position_orientation(pos, p.getQuaternionFromEuler(orn)) - eye_pos = self.robots[0].links["eye"].get_position() + eye_pos = self.robots[0].parts["eye"].get_position() obj_pos = obj.get_position() ray_test_res = p.rayTest(eye_pos, obj_pos) blocked = False @@ -448,7 +648,7 @@ def navigate_to_obj(self, obj): if valid_position is not None: break else: - for _ in range(60): + for _ in range(120): _, pos = obj.scene.get_random_point_by_room_instance(obj.room_instance) yaw = np.random.uniform(-np.pi, np.pi) orn = [0, 0, yaw] @@ -458,21 +658,12 @@ def navigate_to_obj(self, obj): break if valid_position is not None: - target_x = valid_position[0][0] - target_y = valid_position[0][1] - x = original_position[0] - y = original_position[1] - minx = min(x, target_x) - 1 - miny = min(y, target_y) - 1 - maxx = max(x, target_x) + 1 - maxy = max(y, target_y) + 1 - - if self.use_motion_planning: + if use_motion_planning: self.robots[0].set_position_orientation(original_position, original_orientation) plan = plan_base_motion_br( robot=self.robots[0], end_conf=[valid_position[0][0], valid_position[0][1], valid_position[1][2]], - base_limits=[(minx, miny), (maxx, maxy)], + base_limits=(), obstacles=self.get_body_ids(), override_sample_fn=self.sample_fn, ) @@ -481,7 +672,6 @@ def navigate_to_obj(self, obj): if self.mode != "headless": dry_run_base_plan(robot=self.robots[0], plan=plan) else: - # TODO: Still execute the plan in headless mode instead of just teleporting. self.robots[0].set_position_orientation( valid_position[0], p.getQuaternionFromEuler(valid_position[1]) ) @@ -496,20 +686,12 @@ def navigate_to_obj(self, obj): self.robots[0].set_position_orientation(original_position, original_orientation) return False - def navigate_if_needed(self, obj): - if obj.states[object_states.InReachOfRobot].get_value(): - return - - for _ in range(10): - if self.navigate_to_obj(obj): - return - def reset(self, resample_objects=False): + # NOTE: The below line used to be super(BehaviorMPEnv, self, resample_objects=resample_objects).reset() + # However, this causes an error because the super call only expects 2 args. obs = super(BehaviorMPEnv, self).reset() - for hand in ["left_hand", "right_hand"]: - self.robots[0].links[hand].set_close_fraction(0) - self.robots[0].links[hand].trigger_fraction = 0 - self.robots[0].links[hand].force_release_obj() + self.obj_in_hand = None + return obs @@ -524,9 +706,9 @@ def reset(self, resample_objects=False): parser.add_argument( "--mode", "-m", - choices=["headless", "headless_tensor", "gui_interactive", "gui_non_interactive"], - default="gui_interactive", - help="which mode for simulation (default: gui_interactive)", + choices=["headless", "gui", "iggui", "pbgui"], + default="gui", + help="which mode for simulation (default: headless)", ) args = parser.parse_args() @@ -542,7 +724,6 @@ def reset(self, resample_objects=False): print("Episode: {}".format(episode)) start = time.time() env.reset() - for i in range(1000): # 10 seconds action = env.action_space.sample() state, reward, done, info = env.step(action) diff --git a/igibson/envs/behavior_tamp_env.py b/igibson/envs/behavior_tamp_env.py new file mode 100644 index 000000000..9230488d0 --- /dev/null +++ b/igibson/envs/behavior_tamp_env.py @@ -0,0 +1,851 @@ +import argparse +import logging +import time +from enum import IntEnum + +import gym.spaces +import numpy as np +import pybullet as p +import scipy + +from igibson import object_states +from igibson.envs.behavior_env import BehaviorEnv +from igibson.external.pybullet_tools.utils import CIRCULAR_LIMITS, get_base_difference_fn +from igibson.object_states.on_floor import RoomFloor +from igibson.object_states.utils import sample_kinematics, continuous_param_kinematics +from igibson.objects.articulated_object import URDFObject +from igibson.robots.behavior_robot import BRBody, BREye, BRHand +from igibson.utils.behavior_robot_planning_utils import dry_run_base_plan, plan_base_motion_br, plan_hand_motion_br +# from igibson.external.pybullet_tools.utils import ( +# CIRCULAR_LIMITS, +# MAX_DISTANCE, +# PI, +# birrt, +# circular_difference, +# direct_path, +# get_base_difference_fn, +# get_base_distance_fn, +# pairwise_collision, +# ) + +NUM_ACTIONS = 6 +MAX_ACTION_CONT_PARAMS = 7 + + +class ActionPrimitives(IntEnum): + NAVIGATE_TO = 0 + GRASP = 1 + PLACE_ONTOP = 2 + PLACE_INSIDE = 3 + OPEN = 4 + CLOSE = 5 + + +def get_aabb_volume(lo, hi): + dimension = hi - lo + return dimension[0] * dimension[1] * dimension[2] + + +def detect_collision(bodyA, object_in_hand=None): + collision = False + for body_id in range(p.getNumBodies()): + if body_id == bodyA or body_id == object_in_hand: + continue + closest_points = p.getClosestPoints(bodyA, body_id, distance=0.01) + if len(closest_points) > 0: + collision = True + break + return collision + + +def detect_robot_collision(robot): + object_in_hand = robot.parts["right_hand"].object_in_hand + return ( + detect_collision(robot.parts["body"].body_id) + or detect_collision(robot.parts["left_hand"].body_id) + or detect_collision(robot.parts["right_hand"].body_id, object_in_hand) + ) + + +class BehaviorTAMPEnv(BehaviorEnv): + """ + iGibson Environment (OpenAI Gym interface). + Similar to BehaviorMPEnv, except (1) all high-level + actions are parameterized to make them deterministic + and thus conducive to sample-based TAMP and (2) there are + more such high-level actions. + """ + + def __init__( + self, + config_file, + scene_id=None, + mode="headless", + action_timestep=1 / 10.0, + physics_timestep=1 / 240.0, + device_idx=0, + render_to_tensor=False, + automatic_reset=False, + seed=0, + action_filter="mobile_manipulation", + use_motion_planning=False, + ): + """ + :param config_file: config_file path + :param scene_id: override scene_id in config file + :param mode: headless, gui, iggui + :param action_timestep: environment executes action per action_timestep second + :param physics_timestep: physics timestep for pybullet + :param device_idx: which GPU to run the simulation and rendering on + :param render_to_tensor: whether to render directly to pytorch tensors + :param automatic_reset: whether to automatic reset after an episode finishes + """ + super(BehaviorTAMPEnv, self).__init__( + config_file=config_file, + scene_id=scene_id, + mode=mode, + action_timestep=action_timestep, + physics_timestep=physics_timestep, + device_idx=device_idx, + render_to_tensor=render_to_tensor, + action_filter=action_filter, + seed=seed, + automatic_reset=automatic_reset, + ) + + self.obj_in_hand = None + self.use_motion_planning = use_motion_planning + self.robots[0].initial_z_offset = 0.7 + + def load_action_space(self): + self.task_relevant_objects = [ + item + for item in self.task.object_scope.values() + if isinstance(item, URDFObject) or isinstance(item, RoomFloor) + ] + self.num_objects = len(self.task_relevant_objects) + self.action_space = gym.spaces.Discrete(self.num_objects * NUM_ACTIONS) + + def get_body_ids(self, include_self=False): + ids = [] + for object in self.scene.get_objects(): + if isinstance(object, URDFObject): + ids.extend(object.body_ids) + + if include_self: + ids.append(self.robots[0].parts["left_hand"].get_body_id()) + ids.append(self.robots[0].parts["body"].get_body_id()) + + return ids + + def reset_and_release_hand(self): + self.robots[0].set_position_orientation(self.robots[0].get_position(), self.robots[0].get_orientation()) + for _ in range(100): + self.robots[0].parts["right_hand"].set_close_fraction(0) + self.robots[0].parts["right_hand"].trigger_fraction = 0 + p.stepSimulation() + + # def step(self, action, continuous_params): + # """ + # :param action: an integer such that action % self.num_objects yields the object id, and + # action // self.num_objects yields the correct action enum + # :param continuous_params: a numpy vector of length MAX_ACTION_CONT_PARAMS. This represents + # values derived from the continuous parameters of the action. + # """ + + # obj_list_id = int(action) % self.num_objects + # action_primitive = int(action) // self.num_objects + # obj = self.task_relevant_objects[obj_list_id] + + # assert continuous_params.shape == (MAX_ACTION_CONT_PARAMS,) + + # if not (isinstance(obj, BRBody) or isinstance(obj, BRHand) or isinstance(obj, BREye)): + # if action_primitive == ActionPrimitives.NAVIGATE_TO: + # if self.navigate_to_obj_pos(obj, continuous_params[0:2], use_motion_planning=self.use_motion_planning): + # logging.debug("PRIMITIVE: navigate to {} success".format(obj.name)) + # else: + # logging.debug("PRIMITIVE: navigate to {} fail".format(obj.name)) + + # elif action_primitive == ActionPrimitives.GRASP: + # if self.obj_in_hand is None: + # if isinstance(obj, URDFObject) and hasattr(obj, "states") and object_states.AABB in obj.states: + # lo, hi = obj.states[object_states.AABB].get_value() + # volume = get_aabb_volume(lo, hi) + # if ( + # volume < 0.2 * 0.2 * 0.2 and not obj.main_body_is_fixed + # ): # say we can only grasp small objects + # if ( + # np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) + # < 2 + # ): + # self.grasp_obj(obj, use_motion_planning=self.use_motion_planning) + # logging.debug( + # "PRIMITIVE: grasp {} success, obj in hand {}".format(obj.name, self.obj_in_hand) + # ) + # else: + # logging.debug("PRIMITIVE: grasp {} fail, too far".format(obj.name)) + # else: + # logging.debug("PRIMITIVE: grasp {} fail, too big or fixed".format(obj.name)) + + # elif action_primitive == ActionPrimitives.PLACE_ONTOP: + # if self.obj_in_hand is not None and self.obj_in_hand != obj: + # logging.debug("PRIMITIVE:attempt to place {} ontop {}".format(self.obj_in_hand.name, obj.name)) + + # if isinstance(obj, URDFObject): + # if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2: + # state = p.saveState() + # result = sample_kinematics( + # "onTop", + # self.obj_in_hand, + # obj, + # True, + # use_ray_casting_method=True, + # max_trials=20, + # skip_falling=True, + # ) + # if result: + # logging.debug( + # "PRIMITIVE: place {} ontop {} success".format(self.obj_in_hand.name, obj.name) + # ) + # pos = self.obj_in_hand.get_position() + # orn = self.obj_in_hand.get_orientation() + # self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning) + # else: + # p.removeState(state) + # logging.debug( + # "PRIMITIVE: place {} ontop {} fail, sampling fail".format( + # self.obj_in_hand.name, obj.name + # ) + # ) + + # else: + # logging.debug( + # "PRIMITIVE: place {} ontop {} fail, too far".format(self.obj_in_hand.name, obj.name) + # ) + # else: + # state = p.saveState() + # result = sample_kinematics( + # "onFloor", + # self.obj_in_hand, + # obj, + # True, + # use_ray_casting_method=True, + # max_trials=20, + # skip_falling=True, + # ) + # if result: + # logging.debug( + # "PRIMITIVE: place {} ontop {} success".format(self.obj_in_hand.name, obj.name) + # ) + # pos = self.obj_in_hand.get_position() + # orn = self.obj_in_hand.get_orientation() + # self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning) + # else: + # logging.debug( + # "PRIMITIVE: place {} ontop {} fail, sampling fail".format( + # self.obj_in_hand.name, obj.name + # ) + # ) + # p.removeState(state) + + # elif action_primitive == ActionPrimitives.PLACE_INSIDE: + # if self.obj_in_hand is not None and self.obj_in_hand != obj and isinstance(obj, URDFObject): + # logging.debug("PRIMITIVE:attempt to place {} inside {}".format(self.obj_in_hand.name, obj.name)) + # if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2: + # if ( + # hasattr(obj, "states") + # and object_states.Open in obj.states + # and obj.states[object_states.Open].get_value() + # ) or (hasattr(obj, "states") and not object_states.Open in obj.states): + # state = p.saveState() + # result = sample_kinematics( + # "inside", + # self.obj_in_hand, + # obj, + # True, + # use_ray_casting_method=True, + # max_trials=20, + # skip_falling=True, + # ) + # if result: + # logging.debug( + # "PRIMITIVE: place {} inside {} success".format(self.obj_in_hand.name, obj.name) + # ) + # pos = self.obj_in_hand.get_position() + # orn = self.obj_in_hand.get_orientation() + # self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning) + # else: + # logging.debug( + # "PRIMITIVE: place {} inside {} fail, sampling fail".format( + # self.obj_in_hand.name, obj.name + # ) + # ) + # p.removeState(state) + # else: + # logging.debug( + # "PRIMITIVE: place {} inside {} fail, need open not open".format( + # self.obj_in_hand.name, obj.name + # ) + # ) + # else: + # logging.debug( + # "PRIMITIVE: place {} inside {} fail, too far".format(self.obj_in_hand.name, obj.name) + # ) + # elif action_primitive == ActionPrimitives.OPEN: + # if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2: + # if hasattr(obj, "states") and object_states.Open in obj.states: + # obj.states[object_states.Open].set_value(True) + # else: + # logging.debug("PRIMITIVE open failed, cannot be opened") + # else: + # logging.debug("PRIMITIVE open failed, too far") + + # elif action_primitive == ActionPrimitives.CLOSE: + # if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2: + # if hasattr(obj, "states") and object_states.Open in obj.states: + # obj.states[object_states.Open].set_value(False) + # else: + # logging.debug("PRIMITIVE close failed, cannot be opened") + # else: + # logging.debug("PRIMITIVE close failed, too far") + + # state, reward, done, info = super(BehaviorTAMPEnv, self).step(np.zeros(17)) + # logging.debug("PRIMITIVE satisfied predicates:", info["satisfied_predicates"]) + # return state, reward, done, info + + def step_with_exec_info(self, action, continuous_params): + """ + Same as the above step method, but returns an additional param (action_exec_status) + that is True if the high-level action execution succeeded and False otherwise + """ + obj_list_id = int(action) % self.num_objects + action_primitive = int(action) // self.num_objects + action_exec_status = False + obj = self.task_relevant_objects[obj_list_id] + + assert continuous_params.shape == (MAX_ACTION_CONT_PARAMS,) + + if not (isinstance(obj, BRBody) or isinstance(obj, BRHand) or isinstance(obj, BREye)): + if action_primitive == ActionPrimitives.NAVIGATE_TO: + if self.navigate_to_obj_pos(obj, continuous_params[0:2], use_motion_planning=self.use_motion_planning): + print("PRIMITIVE: navigate to {} success".format(obj.name)) + action_exec_status = True + else: + print(f"PRIMITIVE: navigate to {obj.name} with params {continuous_params[0:2]} fail") + + elif action_primitive == ActionPrimitives.GRASP: + if self.obj_in_hand is None: + if isinstance(obj, URDFObject) and hasattr(obj, "states") and object_states.AABB in obj.states: + lo, hi = obj.states[object_states.AABB].get_value() + volume = get_aabb_volume(lo, hi) + if ( + volume < 0.2 * 0.2 * 0.2 and not obj.main_body_is_fixed + ): # say we can only grasp small objects + if ( + np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) + < 2 + ): + self.grasp_obj_at_pos(obj, continuous_params[0:3], use_motion_planning=self.use_motion_planning) + print( + "PRIMITIVE: grasp {} success, obj in hand {}".format(obj.name, self.obj_in_hand) + ) + action_exec_status = True + else: + print("PRIMITIVE: grasp {} fail, too far".format(obj.name)) + else: + print("PRIMITIVE: grasp {} fail, too big or fixed".format(obj.name)) + else: + print("PRIMITIVE: grasp {} fail, agent already has an object in hand!".format(obj.name)) + + elif action_primitive == ActionPrimitives.PLACE_ONTOP: + if self.obj_in_hand is not None and self.obj_in_hand != obj: + print("PRIMITIVE:attempt to place {} ontop {}".format(self.obj_in_hand.name, obj.name)) + + if isinstance(obj, URDFObject): + if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2: + state = p.saveState() + result = continuous_param_kinematics( + "onTop", + self.obj_in_hand, + obj, + True, + continuous_params[0:7], + use_ray_casting_method=True, + max_trials=10, + skip_falling=True, + ) + if result: + print( + "PRIMITIVE: place {} ontop {} success".format(self.obj_in_hand.name, obj.name) + ) + action_exec_status = True + pos = self.obj_in_hand.get_position() + orn = self.obj_in_hand.get_orientation() + self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning) + else: + p.removeState(state) + print( + "PRIMITIVE: place {} ontop {} fail, sampling fail".format( + self.obj_in_hand.name, obj.name + ) + ) + + else: + print( + "PRIMITIVE: place {} ontop {} fail, too far".format(self.obj_in_hand.name, obj.name) + ) + else: + state = p.saveState() + result = continuous_param_kinematics( + "onFloor", + self.obj_in_hand, + obj, + True, + continuous_params[0:7], + use_ray_casting_method=True, + max_trials=10, + skip_falling=True, + ) + if result: + print( + "PRIMITIVE: place {} ontop {} success".format(self.obj_in_hand.name, obj.name) + ) + action_exec_status = True + pos = self.obj_in_hand.get_position() + orn = self.obj_in_hand.get_orientation() + self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning) + else: + print( + "PRIMITIVE: place {} ontop {} fail, sampling fail".format( + self.obj_in_hand.name, obj.name + ) + ) + p.removeState(state) + + # elif action_primitive == ActionPrimitives.PLACE_INSIDE: + # if self.obj_in_hand is not None and self.obj_in_hand != obj and isinstance(obj, URDFObject): + # print("PRIMITIVE:attempt to place {} inside {}".format(self.obj_in_hand.name, obj.name)) + # if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2: + # if ( + # hasattr(obj, "states") + # and object_states.Open in obj.states + # and obj.states[object_states.Open].get_value() + # ) or (hasattr(obj, "states") and not object_states.Open in obj.states): + # state = p.saveState() + # result = sample_kinematics( + # "inside", + # self.obj_in_hand, + # obj, + # True, + # use_ray_casting_method=True, + # max_trials=20, + # skip_falling=True, + # ) + # if result: + # print( + # "PRIMITIVE: place {} inside {} success".format(self.obj_in_hand.name, obj.name) + # ) + # action_exec_status = True + # pos = self.obj_in_hand.get_position() + # orn = self.obj_in_hand.get_orientation() + # self.place_obj(state, pos, orn, use_motion_planning=self.use_motion_planning) + # else: + # print( + # "PRIMITIVE: place {} inside {} fail, sampling fail".format( + # self.obj_in_hand.name, obj.name + # ) + # ) + # p.removeState(state) + # else: + # print( + # "PRIMITIVE: place {} inside {} fail, need open not open".format( + # self.obj_in_hand.name, obj.name + # ) + # ) + # else: + # print( + # "PRIMITIVE: place {} inside {} fail, too far".format(self.obj_in_hand.name, obj.name) + # ) + # elif action_primitive == ActionPrimitives.OPEN: + # if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2: + # if hasattr(obj, "states") and object_states.Open in obj.states: + # action_exec_status = True + # print("PRIMITIVE open succeeded") + # obj.states[object_states.Open].set_value(True) + # else: + # print("PRIMITIVE open failed, cannot be opened") + # else: + # print("PRIMITIVE open failed, too far") + + # elif action_primitive == ActionPrimitives.CLOSE: + # if np.linalg.norm(np.array(obj.get_position()) - np.array(self.robots[0].get_position())) < 2: + # if hasattr(obj, "states") and object_states.Open in obj.states: + # action_exec_status = True + # obj.states[object_states.Open].set_value(False) + # print("PRIMITIVE close succeeded") + # else: + # print("PRIMITIVE close failed, cannot be opened") + # else: + # print("PRIMITIVE close failed, too far") + + else: + print(f"Attempted to execute a High Level Action whose target was the robot's body! This is incorrect, please pass a sensible high level action argument!") + + state, reward, done, info = super(BehaviorTAMPEnv, self).step(np.zeros(17)) + print("PRIMITIVE satisfied predicates:", info["satisfied_predicates"]) + return state, reward, done, info, action_exec_status + + def grasp_obj_at_pos(self, obj, grasp_offset_and_z_rot, use_motion_planning=False): + if use_motion_planning: + x, y, z = obj.get_position() + x += grasp_offset_and_z_rot[0] + y += grasp_offset_and_z_rot[1] + z += grasp_offset_and_z_rot[2] + hand_x, hand_y, hand_z = self.robots[0].parts["right_hand"].get_position() + + # # add a little randomness to avoid getting stuck + # x += np.random.uniform(-0.025, 0.025) + # y += np.random.uniform(-0.025, 0.025) + # z += np.random.uniform(-0.025, 0.025) + + minx = min(x, hand_x) - 0.5 + miny = min(y, hand_y) - 0.5 + minz = min(z, hand_z) - 0.5 + maxx = max(x, hand_x) + 0.5 + maxy = max(y, hand_y) + 0.5 + maxz = max(z, hand_z) + 0.5 + + # compute the angle the hand must be in such that it can grasp the object from its current offset position + # This involves aligning the z-axis (in the world frame) of the hand with the vector that goes from the hand + # to the object. We can find the rotation matrix that accomplishes this rotation by following: + # https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d + hand_to_obj_vector = np.array([x - hand_x, y - hand_y, z - hand_z]) + hand_to_obj_unit_vector = hand_to_obj_vector / np.linalg.norm(hand_to_obj_vector) + unit_z_vector = np.array(0,0,1) + c = np.dot(unit_z_vector, hand_to_obj_unit_vector) + if not c == -1.0: + v = np.cross(unit_z_vector, hand_to_obj_unit_vector) + s = np.linalg.norm(v) + v_x = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]) + R = np.eye(3) + v_x + np.linalg.matrix_power(v_x, 2) * ((1-c)/(s ** 2)) + r = scipy.spatial.transform.Rotation.from_matrix(R) + euler_angles = r.as_euler('xyz') + euler_angles[2] += grasp_offset_and_z_rot[3] + else: + euler_angles = np.array([0.0, np.pi, 0.0]) + + state = p.saveState() + # plan a motion to above the object + plan = plan_hand_motion_br( + robot=self.robots[0], + obj_in_hand=None, + end_conf=[x, y, z, euler_angles[0], euler_angles[1], euler_angles[2]], + hand_limits=((minx, miny, minz), (maxx, maxy, maxz)), + obstacles=self.get_body_ids(include_self=True), + ) + p.restoreState(state) + p.removeState(state) + + if plan is not None: + grasp_success = self.execute_grasp_plan(plan, obj) + logging.debug("grasp success", grasp_success) + if grasp_success: + self.obj_in_hand = obj + else: + logging.debug("grasp failed") + self.reset_and_release_hand() + else: + logging.debug("plan is None") + self.robots[0].set_position_orientation(self.robots[0].get_position(), self.robots[0].get_orientation()) + self.reset_and_release_hand() + else: + self.obj_in_hand = obj + obj.set_position(np.array(self.robots[0].parts["right_hand"].get_position())) + self.robots[0].parts["right_hand"].set_close_fraction(1) + self.robots[0].parts["right_hand"].trigger_fraction = 1 + p.stepSimulation() + obj.set_position(np.array(self.robots[0].parts["right_hand"].get_position())) + self.robots[0].parts["right_hand"].handle_assisted_grasping( + np.zeros( + 28, + ), + override_ag_data=(obj.body_id[0], -1), + ) + + def execute_grasp_plan(self, plan, obj): + for x, y, z, roll, pitch, yaw in plan: + self.robots[0].parts["right_hand"].move([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw])) + p.stepSimulation() + # s, a = get_last_action(p) + + x, y, z, roll, pitch, yaw = plan[-1] + + # lower the hand until it touches the object + for i in range(25): + self.robots[0].parts["right_hand"].move([x, y, z - i * 0.005], p.getQuaternionFromEuler([roll, pitch, yaw])) + p.stepSimulation() + + # close the hand + for _ in range(50): + self.robots[0].parts["right_hand"].set_close_fraction(1) + self.robots[0].parts["right_hand"].trigger_fraction = 1 + p.stepSimulation() + + grasp_success = ( + self.robots[0] + .parts["right_hand"] + .handle_assisted_grasping( + np.zeros( + 28, + ), + override_ag_data=(obj.body_id[0], -1), + ) + ) + + # reverse the plan and get object close to torso + for x, y, z, roll, pitch, yaw in plan[::-1]: + self.robots[0].parts["right_hand"].move([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw])) + p.stepSimulation() + + return grasp_success + + def place_obj(self, original_state, target_pos, target_orn, use_motion_planning=False): + pos = self.obj_in_hand.get_position() + p.restoreState(original_state) + p.removeState(original_state) + if not use_motion_planning: + self.reset_and_release_hand() + self.robots[0].parts["right_hand"].force_release_obj() + self.obj_in_hand.set_position_orientation(target_pos, target_orn) + self.obj_in_hand = None + + else: + x, y, z = target_pos + hand_x, hand_y, hand_z = self.robots[0].parts["right_hand"].get_position() + + minx = min(x, hand_x) - 1 + miny = min(y, hand_y) - 1 + minz = min(z, hand_z) - 0.5 + maxx = max(x, hand_x) + 1 + maxy = max(y, hand_y) + 1 + maxz = max(z, hand_z) + 0.5 + + state = p.saveState() + obstacles = self.get_body_ids() + obstacles.remove(self.obj_in_hand.body_id[0]) + plan = plan_hand_motion_br( + robot=self.robots[0], + obj_in_hand=self.obj_in_hand, + end_conf=[x, y, z + 0.1, 0, np.pi * 5 / 6.0, 0], + hand_limits=((minx, miny, minz), (maxx, maxy, maxz)), + obstacles=obstacles, + ) # + p.restoreState(state) + p.removeState(state) + + if plan: + for x, y, z, roll, pitch, yaw in plan: + self.robots[0].parts["right_hand"].move([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw])) + p.stepSimulation() + released_obj = self.obj_in_hand + self.obj_in_hand = None + + # release hand + self.reset_and_release_hand() + + # force release object to avoid dealing with stateful AG release mechanism + self.robots[0].parts["right_hand"].force_release_obj() + self.robots[0].set_position_orientation(self.robots[0].get_position(), self.robots[0].get_orientation()) + # reset hand + + # reset the released object to zero velocity + p.resetBaseVelocity(released_obj.get_body_id(), linearVelocity=[0, 0, 0], angularVelocity=[0, 0, 0]) + + # let object fall + for _ in range(100): + p.stepSimulation() + + def place_obj_plan(self, original_state, target_pos, target_orn, use_motion_planning=False): + pos = self.obj_in_hand.get_position() + p.restoreState(original_state) + p.removeState(original_state) + if not use_motion_planning: + self.reset_and_release_hand() + self.robots[0].parts["right_hand"].force_release_obj() + self.obj_in_hand.set_position_orientation(target_pos, target_orn) + self.obj_in_hand = None + + else: + x, y, z = target_pos + hand_x, hand_y, hand_z = self.robots[0].parts["right_hand"].get_position() + + minx = min(x, hand_x) - 1 + miny = min(y, hand_y) - 1 + minz = min(z, hand_z) - 0.5 + maxx = max(x, hand_x) + 1 + maxy = max(y, hand_y) + 1 + maxz = max(z, hand_z) + 0.5 + + state = p.saveState() + obstacles = self.get_body_ids() + obstacles.remove(self.obj_in_hand.body_id[0]) + plan = plan_hand_motion_br( + robot=self.robots[0], + obj_in_hand=self.obj_in_hand, + end_conf=[x, y, z + 0.1, 0, np.pi * 5 / 6.0, 0], + hand_limits=((minx, miny, minz), (maxx, maxy, maxz)), + obstacles=obstacles, + ) # + p.restoreState(state) + p.removeState(state) + + if plan: + for x, y, z, roll, pitch, yaw in plan: + self.robots[0].parts["right_hand"].move([x, y, z], p.getQuaternionFromEuler([roll, pitch, yaw])) + p.stepSimulation() + released_obj = self.obj_in_hand + self.obj_in_hand = None + + # release hand + self.reset_and_release_hand() + + # force release object to avoid dealing with stateful AG release mechanism + self.robots[0].parts["right_hand"].force_release_obj() + self.robots[0].set_position_orientation(self.robots[0].get_position(), self.robots[0].get_orientation()) + # reset hand + + # reset the released object to zero velocity + p.resetBaseVelocity(released_obj.get_body_id(), linearVelocity=[0, 0, 0], angularVelocity=[0, 0, 0]) + + # let object fall + for _ in range(100): + p.stepSimulation() + + return plan + + # def convert_nav_action_to_17dim_action(nav_action): + # """ + # :param nav_action: a 3-dim numpy array that represents the x-position, y-position and + # z-rotation of the robot in the world frame + # """ + # curr_body_pos = self.robots[0].get_position() + # curr_body_eul = p.getEulerFromQuaternion(self.robots[0].parts["right_hand"].get_orientation()) + # curr_right_pos = self.robots[0].parts["right_hand"].get_position() + # curr_right_eul = p.getEulerFromQuaternion(self.robots[0].parts["right_hand"].get_orientation()) + + # curr_body_pos_arr = np.zeros(3) + # curr_body_pos_arr[0:2] = np.array(curr_body_pos) + # new_body_pos_arr = np.zeros(3) + # curr_body_pos_arr[0:2] = np.array(nav_action[0:2]) + # new_body_pos_arr[2] = curr_body_pos_arr[2] + # right_pos_offset = curr_body_pos_arr - np.array(curr_right_pos) + # new_right_pos = new_body_pos_arr - right_pos_offset + + + + def sample_fn(self): + random_point = self.scene.get_random_point() + x, y = random_point[1][:2] + theta = np.random.uniform(*CIRCULAR_LIMITS) + return (x, y, theta) + + def navigate_to_obj_pos(self, obj, pos_offset): + """ + navigates the robot to a certain x,y position and selects an orientation + such that the robot is facing the object. If the navigation is infeasible, + returns an indication to this effect. + + :param obj: an object to navigate toward + :param to_pos: a length-2 numpy array (x, y) containing a position to navigate to + """ + + # test agent positions around an obj + # try to place the agent near the object, and rotate it to the object + valid_position = None # ((x,y,z),(roll, pitch, yaw)) + original_position = self.robots[0].get_position() + original_orientation = self.robots[0].get_orientation() + base_diff_fn = get_base_difference_fn() + + if isinstance(obj, URDFObject): # must be a URDFObject so we can get its position! + obj_pos = obj.get_position() + pos = [pos_offset[0] + obj_pos[0], pos_offset[1] + obj_pos[1], self.robots[0].initial_z_offset] + yaw_angle = np.arctan2(pos_offset[1], pos_offset[0]) + orn = [0, 0, yaw_angle] + self.robots[0].set_position_orientation(pos, p.getQuaternionFromEuler(orn)) + eye_pos = self.robots[0].parts["eye"].get_position() + ray_test_res = p.rayTest(eye_pos, obj_pos) + # Test to see if the robot is obstructed by some object, but make sure that object + # is not either the robot's body or the object we want to pick up! + blocked = len(ray_test_res) > 0 and (ray_test_res[0][0] not in (self.robots[0].parts["body"].get_body_id(), obj.get_body_id())) + if not detect_robot_collision(self.robots[0]) and not blocked: + valid_position = (pos, orn) + + if valid_position is not None: + self.robots[0].set_position_orientation(original_position, original_orientation) + plan = plan_base_motion_br( + robot=self.robots[0], + end_conf=[valid_position[0][0], valid_position[0][1], valid_position[1][2]], + base_limits=(), + obstacles=self.get_body_ids(), + override_sample_fn=self.sample_fn, + ) + + plan_num_steps = len(plan) + ret_actions = np.zeros((17, plan_num_steps - 1)) + plan_arr = np.array(plan) + + for i in range(1, plan_num_steps): + ret_actions[0:3, i] = base_diff_fn(plan_arr[:, i], plan_arr[:, i-1]) + + return ret_actions, True + + else: + print("Position commanded is in collision!") + self.robots[0].set_position_orientation(original_position, original_orientation) + return np.zeros(17), False + + + def reset(self, resample_objects=False): + obs = super(BehaviorTAMPEnv, self).reset() + self.obj_in_hand = None + + return obs + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--config", + "-c", + default="igibson/examples/configs/behavior.yaml", + help="which config file to use [default: use yaml files in examples/configs]", + ) + parser.add_argument( + "--mode", + "-m", + choices=["headless", "gui", "iggui", "pbgui"], + default="gui", + help="which mode for simulation (default: headless)", + ) + args = parser.parse_args() + + env = BehaviorTAMPEnv( + config_file=args.config, + mode=args.mode, + action_timestep=1.0 / 300.0, + physics_timestep=1.0 / 300.0, + use_motion_planning=True, + ) + step_time_list = [] + for episode in range(100): + print("Episode: {}".format(episode)) + start = time.time() + env.reset() + for i in range(1000): # 10 seconds + action = env.action_space.sample() + state, reward, done, info = env.step(action) + print(reward, info) + if done: + break + print("Episode finished after {} timesteps, took {} seconds.".format(env.current_step, time.time() - start)) + env.close() diff --git a/igibson/envs/env_base.py b/igibson/envs/env_base.py index 03ad36092..8a89e41a7 100644 --- a/igibson/envs/env_base.py +++ b/igibson/envs/env_base.py @@ -1,21 +1,28 @@ import gym from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings -from igibson.render.mesh_renderer.mesh_renderer_vr import VrSettings -from igibson.robots import REGISTERED_ROBOTS +from igibson.robots.ant_robot import Ant +from igibson.robots.fetch_robot import Fetch +from igibson.robots.freight_robot import Freight +from igibson.robots.humanoid_robot import Humanoid +from igibson.robots.husky_robot import Husky +from igibson.robots.jr2_kinova_robot import JR2_Kinova +from igibson.robots.jr2_robot import JR2 +from igibson.robots.locobot_robot import Locobot +from igibson.robots.turtlebot_robot import Turtlebot from igibson.scenes.empty_scene import EmptyScene from igibson.scenes.gibson_indoor_scene import StaticIndoorScene from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene from igibson.scenes.stadium_scene import StadiumScene from igibson.simulator import Simulator -from igibson.simulator_vr import SimulatorVR from igibson.utils.utils import parse_config class BaseEnv(gym.Env): """ - Base Env class that handles loading scene and robot, following OpenAI Gym interface. - Functions like reset and step are not implemented. + Base Env class, follows OpenAI Gym interface + Handles loading scene and robot + Functions like reset and step are not implemented """ def __init__( @@ -25,21 +32,16 @@ def __init__( mode="headless", action_timestep=1 / 10.0, physics_timestep=1 / 240.0, - rendering_settings=None, - vr_settings=None, + render_to_tensor=False, device_idx=0, - use_pb_gui=False, ): """ :param config_file: config_file path :param scene_id: override scene_id in config file - :param mode: headless, headless_tensor, gui_interactive, gui_non_interactive, vr + :param mode: headless or gui mode :param action_timestep: environment executes action per action_timestep second :param physics_timestep: physics timestep for pybullet - :param rendering_settings: rendering_settings to override the default one - :param vr_settings: vr_settings to override the default one :param device_idx: device_idx: which GPU to run the simulation and rendering on - :param use_pb_gui: concurrently display the interactive pybullet gui (for debugging) """ self.config = parse_config(config_file) if scene_id is not None: @@ -48,60 +50,37 @@ def __init__( self.mode = mode self.action_timestep = action_timestep self.physics_timestep = physics_timestep - self.rendering_settings = rendering_settings - self.vr_settings = vr_settings self.texture_randomization_freq = self.config.get("texture_randomization_freq", None) self.object_randomization_freq = self.config.get("object_randomization_freq", None) self.object_randomization_idx = 0 self.num_object_randomization_idx = 10 - enable_shadow = self.config.get("enable_shadow", True) + enable_shadow = self.config.get("enable_shadow", False) enable_pbr = self.config.get("enable_pbr", True) texture_scale = self.config.get("texture_scale", 1.0) - if self.rendering_settings is None: - # TODO: We currently only support the optimized renderer due to some issues with obj highlighting. - self.rendering_settings = MeshRendererSettings( - enable_shadow=enable_shadow, - enable_pbr=enable_pbr, - msaa=False, - texture_scale=texture_scale, - optimized=self.config.get("optimized_renderer", True), - load_textures=self.config.get("load_texture", True), - ) - - if mode == "vr": - if self.vr_settings is None: - self.vr_settings = VrSettings(use_vr=True) - self.simulator = SimulatorVR( - physics_timestep=physics_timestep, - render_timestep=action_timestep, - image_width=self.config.get("image_width", 128), - image_height=self.config.get("image_height", 128), - vertical_fov=self.config.get("vertical_fov", 90), - device_idx=device_idx, - rendering_settings=self.rendering_settings, - vr_settings=self.vr_settings, - use_pb_gui=use_pb_gui, - ) - else: - self.simulator = Simulator( - mode=mode, - physics_timestep=physics_timestep, - render_timestep=action_timestep, - image_width=self.config.get("image_width", 128), - image_height=self.config.get("image_height", 128), - vertical_fov=self.config.get("vertical_fov", 90), - device_idx=device_idx, - rendering_settings=self.rendering_settings, - use_pb_gui=use_pb_gui, - ) + # TODO: We currently only support the optimized renderer due to some issues with obj highlighting + settings = MeshRendererSettings( + enable_shadow=enable_shadow, enable_pbr=enable_pbr, msaa=False, texture_scale=texture_scale, optimized=False + ) + + self.simulator = Simulator( + mode=mode, + physics_timestep=physics_timestep, + render_timestep=action_timestep, + image_width=self.config.get("image_width", 128), + image_height=self.config.get("image_height", 128), + vertical_fov=self.config.get("vertical_fov", 90), + device_idx=device_idx, + render_to_tensor=render_to_tensor, + rendering_settings=settings, + ) self.load() def reload(self, config_file): """ - Reload another config file. - This allows one to change the configuration on the fly. + Reload another config file + Thhis allows one to change the configuration on the fly :param config_file: new config file path """ @@ -111,8 +90,8 @@ def reload(self, config_file): def reload_model(self, scene_id): """ - Reload another scene model. - This allows one to change the scene on the fly. + Reload another scene model + This allows one to change the scene on the fly :param scene_id: new scene_id """ @@ -122,7 +101,7 @@ def reload_model(self, scene_id): def reload_model_object_randomization(self): """ - Reload the same model, with the next object randomization random seed. + Reload the same model, with the next object randomization random seed """ if self.object_randomization_freq is None: return @@ -130,14 +109,26 @@ def reload_model_object_randomization(self): self.simulator.reload() self.load() + def get_next_scene_random_seed(self): + """ + Get the next scene random seed + """ + if self.object_randomization_freq is None: + return None + return self.scene_random_seeds[self.scene_random_seed_idx] + def load(self): """ - Load the scene and robot specified in the config file. + Load the scene and robot """ if self.config["scene"] == "empty": scene = EmptyScene() + self.simulator.import_scene( + scene, load_texture=self.config.get("load_texture", True), render_floor_plane=True + ) elif self.config["scene"] == "stadium": scene = StadiumScene() + self.simulator.import_scene(scene, load_texture=self.config.get("load_texture", True)) elif self.config["scene"] == "gibson": scene = StaticIndoorScene( self.config["scene_id"], @@ -148,76 +139,67 @@ def load(self): trav_map_erosion=self.config.get("trav_map_erosion", 2), pybullet_load_texture=self.config.get("pybullet_load_texture", False), ) + self.simulator.import_scene(scene, load_texture=self.config.get("load_texture", True)) elif self.config["scene"] == "igibson": - urdf_file = self.config.get("urdf_file", None) - if urdf_file is None and not self.config.get("online_sampling", True): - urdf_file = "{}_task_{}_{}_{}_fixed_furniture".format( - self.config["scene_id"], - self.config["task"], - self.config["task_id"], - self.config["instance_id"], - ) scene = InteractiveIndoorScene( self.config["scene_id"], - urdf_file=urdf_file, waypoint_resolution=self.config.get("waypoint_resolution", 0.2), num_waypoints=self.config.get("num_waypoints", 10), build_graph=self.config.get("build_graph", False), trav_map_resolution=self.config.get("trav_map_resolution", 0.1), trav_map_erosion=self.config.get("trav_map_erosion", 2), trav_map_type=self.config.get("trav_map_type", "with_obj"), + pybullet_load_texture=self.config.get("pybullet_load_texture", False), texture_randomization=self.texture_randomization_freq is not None, object_randomization=self.object_randomization_freq is not None, object_randomization_idx=self.object_randomization_idx, should_open_all_doors=self.config.get("should_open_all_doors", False), load_object_categories=self.config.get("load_object_categories", None), - not_load_object_categories=self.config.get("not_load_object_categories", None), load_room_types=self.config.get("load_room_types", None), load_room_instances=self.config.get("load_room_instances", None), - merge_fixed_links=self.config.get("merge_fixed_links", True) - and not self.config.get("online_sampling", False), ) - # TODO: Unify the function import_scene and take out of the if-else clauses. + # TODO: Unify the function import_scene and take out of the if-else clauses first_n = self.config.get("_set_first_n_objects", -1) if first_n != -1: scene._set_first_n_objects(first_n) - - self.simulator.import_scene(scene) - - # Get robot config - robot_config = self.config["robot"] - - # Get corresponding robot class - robot_name = robot_config.pop("name") - assert robot_name in REGISTERED_ROBOTS, "Got invalid robot to instantiate: {}".format(robot_name) - - # TODO: Remove if statement once BEHAVIOR robot is refactored - if robot_name == "BehaviorRobot": - robot = REGISTERED_ROBOTS[robot_name](self.simulator) - else: - robot = REGISTERED_ROBOTS[robot_name](**robot_config) - - self.simulator.import_robot(robot) - - # TODO: Remove if statement once BEHAVIOR robot is refactored - if robot_name == "BehaviorRobot": - self.robot_body_id = robot.links["body"].get_body_id() + self.simulator.import_ig_scene(scene) + + if self.config["robot"] == "Turtlebot": + robot = Turtlebot(self.config) + elif self.config["robot"] == "Husky": + robot = Husky(self.config) + elif self.config["robot"] == "Ant": + robot = Ant(self.config) + elif self.config["robot"] == "Humanoid": + robot = Humanoid(self.config) + elif self.config["robot"] == "JR2": + robot = JR2(self.config) + elif self.config["robot"] == "JR2_Kinova": + robot = JR2_Kinova(self.config) + elif self.config["robot"] == "Freight": + robot = Freight(self.config) + elif self.config["robot"] == "Fetch": + robot = Fetch(self.config) + elif self.config["robot"] == "Locobot": + robot = Locobot(self.config) else: - self.robot_body_id = robot.get_body_id() + raise Exception("unknown robot type: {}".format(self.config["robot"])) - self.scene = self.simulator.scene - self.robots = self.simulator.robots + self.scene = scene + self.robots = [robot] + for robot in self.robots: + self.simulator.import_robot(robot) def clean(self): """ - Clean up the environment. + Clean up """ if self.simulator is not None: self.simulator.disconnect() def close(self): """ - Synonymous function with clean. + Synonymous function with clean """ self.clean() @@ -231,12 +213,18 @@ def simulator_step(self): def step(self, action): """ - Overwritten by subclasses. + Overwritten by subclasses """ return NotImplementedError() def reset(self): """ - Overwritten by subclasses. + Overwritten by subclasses """ return NotImplementedError() + + def set_mode(self, mode): + """ + Set simulator mode + """ + self.simulator.mode = mode diff --git a/igibson/envs/igibson_env.py b/igibson/envs/igibson_env.py index 7b4e087b0..38f8e80db 100644 --- a/igibson/envs/igibson_env.py +++ b/igibson/envs/igibson_env.py @@ -1,6 +1,5 @@ import argparse import logging -import os import time from collections import OrderedDict @@ -11,13 +10,10 @@ from igibson.envs.env_base import BaseEnv from igibson.external.pybullet_tools.utils import stable_z_on_aabb -from igibson.robots.behavior_robot import BehaviorRobot from igibson.robots.robot_base import BaseRobot from igibson.sensors.bump_sensor import BumpSensor from igibson.sensors.scan_sensor import ScanSensor from igibson.sensors.vision_sensor import VisionSensor -from igibson.tasks.behavior_task import BehaviorTask -from igibson.tasks.dummy_task import DummyTask from igibson.tasks.dynamic_nav_random_task import DynamicNavRandomTask from igibson.tasks.interactive_nav_random_task import InteractiveNavRandomTask from igibson.tasks.point_nav_fixed_task import PointNavFixedTask @@ -30,7 +26,7 @@ class iGibsonEnv(BaseEnv): """ - iGibson Environment (OpenAI Gym interface). + iGibson Environment (OpenAI Gym interface) """ def __init__( @@ -40,23 +36,19 @@ def __init__( mode="headless", action_timestep=1 / 10.0, physics_timestep=1 / 240.0, - rendering_settings=None, - vr_settings=None, device_idx=0, + render_to_tensor=False, automatic_reset=False, - use_pb_gui=False, ): """ :param config_file: config_file path :param scene_id: override scene_id in config file - :param mode: headless, headless_tensor, gui_interactive, gui_non_interactive, vr + :param mode: headless, gui, iggui :param action_timestep: environment executes action per action_timestep second :param physics_timestep: physics timestep for pybullet - :param rendering_settings: rendering_settings to override the default one - :param vr_settings: vr_settings to override the default one :param device_idx: which GPU to run the simulation and rendering on + :param render_to_tensor: whether to render directly to pytorch tensors :param automatic_reset: whether to automatic reset after an episode finishes - :param use_pb_gui: concurrently display the interactive pybullet gui (for debugging) """ super(iGibsonEnv, self).__init__( config_file=config_file, @@ -64,16 +56,14 @@ def __init__( mode=mode, action_timestep=action_timestep, physics_timestep=physics_timestep, - rendering_settings=rendering_settings, - vr_settings=vr_settings, device_idx=device_idx, - use_pb_gui=use_pb_gui, + render_to_tensor=render_to_tensor, ) self.automatic_reset = automatic_reset def load_task_setup(self): """ - Load task setup. + Load task setup """ self.initial_pos_z_offset = self.config.get("initial_pos_z_offset", 0.1) # s = 0.5 * G * (t ** 2) @@ -93,9 +83,7 @@ def load_task_setup(self): self.object_randomization_freq = self.config.get("object_randomization_freq", None) # task - if "task" not in self.config: - self.task = DummyTask(self) - elif self.config["task"] == "point_nav_fixed": + if self.config["task"] == "point_nav_fixed": self.task = PointNavFixedTask(self) elif self.config["task"] == "point_nav_random": self.task = PointNavRandomTask(self) @@ -108,32 +96,17 @@ def load_task_setup(self): elif self.config["task"] == "room_rearrangement": self.task = RoomRearrangementTask(self) else: - try: - import bddl - - with open(os.path.join(os.path.dirname(bddl.__file__), "activity_manifest.txt")) as f: - all_activities = [line.strip() for line in f.readlines()] - - if self.config["task"] in all_activities: - self.task = BehaviorTask(self) - else: - raise Exception("Invalid task: {}".format(self.config["task"])) - except ImportError: - raise Exception("bddl is not available.") + self.task = None def build_obs_space(self, shape, low, high): """ - Helper function that builds individual observation spaces. - - :param shape: shape of the space - :param low: lower bounds of the space - :param high: higher bounds of the space + Helper function that builds individual observation spaces """ return gym.spaces.Box(low=low, high=high, shape=shape, dtype=np.float32) def load_observation_space(self): """ - Load observation space. + Load observation space """ self.output = self.config["output"] self.image_width = self.config.get("image_width", 128) @@ -215,10 +188,6 @@ def load_observation_space(self): if "bump" in self.output: observation_space["bump"] = gym.spaces.Box(low=0.0, high=1.0, shape=(1,)) sensors["bump"] = BumpSensor(self) - if "proprioception" in self.output: - observation_space["proprioception"] = self.build_obs_space( - shape=(self.robots[0].proprioception_dim,), low=-np.inf, high=np.inf - ) if len(vision_modalities) > 0: sensors["vision"] = VisionSensor(self, vision_modalities) @@ -231,13 +200,13 @@ def load_observation_space(self): def load_action_space(self): """ - Load action space. + Load action space """ self.action_space = self.robots[0].action_space def load_miscellaneous_variables(self): """ - Load miscellaneous variables for book keeping. + Load miscellaneous variables for book keeping """ self.current_step = 0 self.collision_step = 0 @@ -246,7 +215,7 @@ def load_miscellaneous_variables(self): def load(self): """ - Load environment. + Load environment """ super(iGibsonEnv, self).load() self.load_task_setup() @@ -254,10 +223,11 @@ def load(self): self.load_action_space() self.load_miscellaneous_variables() - def get_state(self): + def get_state(self, collision_links=[]): """ - Get the current observation. + Get the current observation + :param collision_links: collisions from last physics timestep :return: observation as a dictionary """ state = OrderedDict() @@ -273,24 +243,22 @@ def get_state(self): state[modality] = scan_obs[modality] if "bump" in self.sensors: state["bump"] = self.sensors["bump"].get_obs(self) - if "proprioception" in self.sensors: - state["proprioception"] = np.array(self.robots[0].get_proprioception()) return state def run_simulation(self): """ - Run simulation for one action timestep (same as one render timestep in Simulator class). + Run simulation for one action timestep (same as one render timestep in Simulator class) - :return: a list of collisions from the last physics timestep + :return: collision_links: collisions from last physics timestep """ self.simulator_step() - collision_links = list(p.getContactPoints(bodyA=self.robot_body_id)) + collision_links = list(p.getContactPoints(bodyA=self.robots[0].robot_ids[0])) return self.filter_collision_links(collision_links) def filter_collision_links(self, collision_links): """ - Filter out collisions that should be ignored. + Filter out collisions that should be ignored :param collision_links: original collisions, a list of collisions :return: filtered collisions @@ -306,23 +274,22 @@ def filter_collision_links(self, collision_links): continue # ignore self collision with robot link a (body b is also robot itself) - if item[2] == self.robot_body_id and item[4] in self.collision_ignore_link_a_ids: + if item[2] == self.robots[0].robot_ids[0] and item[4] in self.collision_ignore_link_a_ids: continue new_collision_links.append(item) return new_collision_links def populate_info(self, info): """ - Populate info dictionary with any useful information. - - :param info: the info dictionary to populate + Populate info dictionary with any useful information """ info["episode_length"] = self.current_step info["collision_step"] = self.collision_step def step(self, action): """ - Apply robot's action and return the next state, reward, done and info, + Apply robot's action. + Returns the next state, reward, done and info, following OpenAI Gym's convention :param action: robot actions @@ -338,7 +305,7 @@ def step(self, action): self.collision_links = collision_links self.collision_step += int(len(collision_links) > 0) - state = self.get_state() + state = self.get_state(collision_links) info = {} reward, info = self.task.get_reward(self, collision_links, action, info) done, info = self.task.get_termination(self, collision_links, action, info) @@ -353,10 +320,10 @@ def step(self, action): def check_collision(self, body_id): """ - Check whether the given body_id has collision after one simulator step + Check with the given body_id has any collision after one simulator step :param body_id: pybullet body id - :return: whether the given body_id has collision + :return: whether the given body_id has no collision """ self.simulator_step() collisions = list(p.getContactPoints(bodyA=body_id)) @@ -365,11 +332,11 @@ def check_collision(self, body_id): for item in collisions: logging.debug("bodyA:{}, bodyB:{}, linkA:{}, linkB:{}".format(item[1], item[2], item[3], item[4])) - return len(collisions) > 0 + return len(collisions) == 0 def set_pos_orn_with_z_offset(self, obj, pos, orn=None, offset=None): """ - Reset position and orientation for the robot or the object. + Reset position and orientation for the robot or the object :param obj: an instance of robot or object :param pos: position @@ -383,7 +350,7 @@ def set_pos_orn_with_z_offset(self, obj, pos, orn=None, offset=None): offset = self.initial_pos_z_offset is_robot = isinstance(obj, BaseRobot) - body_id = obj.get_body_id() + body_id = obj.robot_ids[0] if is_robot else obj.body_id # first set the correct orientation obj.set_position_orientation(pos, quatToXYZW(euler2quat(*orn), "wxyz")) # compute stable z based on this orientation @@ -394,28 +361,28 @@ def set_pos_orn_with_z_offset(self, obj, pos, orn=None, offset=None): def test_valid_position(self, obj, pos, orn=None): """ - Test if the robot or the object can be placed with no collision. + Test if the robot or the object can be placed with no collision :param obj: an instance of robot or object :param pos: position :param orn: orientation - :return: whether the position is valid + :return: validity """ is_robot = isinstance(obj, BaseRobot) self.set_pos_orn_with_z_offset(obj, pos, orn) if is_robot: - obj.reset() + obj.robot_specific_reset() obj.keep_still() - body_id = obj.get_body_id() + body_id = obj.robot_ids[0] if is_robot else obj.body_id has_collision = self.check_collision(body_id) - return not has_collision + return has_collision def land(self, obj, pos, orn): """ - Land the robot or the object onto the floor, given a valid position and orientation. + Land the robot or the object onto the floor, given a valid position and orientation :param obj: an instance of robot or object :param pos: position @@ -426,10 +393,10 @@ def land(self, obj, pos, orn): self.set_pos_orn_with_z_offset(obj, pos, orn) if is_robot: - obj.reset() + obj.robot_specific_reset() obj.keep_still() - body_id = obj.get_body_id() + body_id = obj.robot_ids[0] if is_robot else obj.body_id land_success = False # land for maximum 1 second, should fall down ~5 meters @@ -441,15 +408,15 @@ def land(self, obj, pos, orn): break if not land_success: - logging.warning("Object failed to land.") + print("WARNING: Failed to land") if is_robot: - obj.reset() + obj.robot_specific_reset() obj.keep_still() def reset_variables(self): """ - Reset bookkeeping variables for the next new episode. + Reset bookkeeping variables for the next new episode """ self.current_episode += 1 self.current_step = 0 @@ -458,9 +425,9 @@ def reset_variables(self): def randomize_domain(self): """ - Randomize domain. - Object randomization loads new object models with the same poses. - Texture randomization loads new materials and textures for the same object models. + Domain randomization + Object randomization loads new object models with the same poses + Texture randomization loads new materials and textures for the same object models """ if self.object_randomization_freq is not None: if self.current_episode % self.object_randomization_freq == 0: @@ -471,13 +438,14 @@ def randomize_domain(self): def reset(self): """ - Reset episode. + Reset episode """ self.randomize_domain() - # Move robot away from the scene. + # move robot away from the scene self.robots[0].set_position([100.0, 100.0, 100.0]) - self.task.reset(self) - self.simulator.sync(force_sync=True) + self.task.reset_scene(self) + self.task.reset_agent(self) + self.simulator.sync() state = self.get_state() self.reset_variables() @@ -490,7 +458,7 @@ def reset(self): parser.add_argument( "--mode", "-m", - choices=["headless", "headless_tensor", "gui_interactive", "gui_non_interactive"], + choices=["headless", "gui", "iggui"], default="headless", help="which mode for simulation (default: headless)", ) diff --git a/igibson/envs/igibson_ray_env.py b/igibson/envs/igibson_ray_env.py index e418af10c..136820281 100644 --- a/igibson/envs/igibson_ray_env.py +++ b/igibson/envs/igibson_ray_env.py @@ -28,7 +28,7 @@ def get_current_step(self): parser.add_argument( "--mode", "-m", - choices=["headless", "headless_tensor", "gui_interactive", "gui_non_interactive"], + choices=["headless", "gui", "iggui"], default="headless", help="which mode for simulation (default: headless)", ) diff --git a/igibson/envs/igibson_rllib_env.py b/igibson/envs/igibson_rllib_env.py index bdf3eec62..7b8acc25a 100644 --- a/igibson/envs/igibson_rllib_env.py +++ b/igibson/envs/igibson_rllib_env.py @@ -135,7 +135,7 @@ def __init__(self, env_config): parser.add_argument( "--config", "-c", - default=os.path.join(igibson.root_path, "examples", "configs", "turtlebot_nav.yaml"), + default=os.path.join(igibson.root_path, "examples", "configs", "turtlebot_point_nav.yaml"), help="which config file to use [default: use yaml files in examples/configs]", ) parser.add_argument("--ray_mode", default="train", help="Whether to run ray in train or test mode") @@ -152,7 +152,7 @@ def __init__(self, env_config): parser.add_argument( "--mode", "-m", - choices=["headless", "headless_tensor", "gui_interactive", "gui_non_interactive"], + choices=["headless", "gui", "iggui"], default="headless", help="which mode for simulation (default: headless)", ) diff --git a/igibson/envs/parallel_env.py b/igibson/envs/parallel_env.py index ae25ecd91..c9a901b4d 100644 --- a/igibson/envs/parallel_env.py +++ b/igibson/envs/parallel_env.py @@ -243,7 +243,7 @@ def _worker(self, conn, env_constructor, flatten=False): if __name__ == "__main__": - config_filename = os.path.join(os.path.dirname(igibson.__file__), "..", "tests", "test.yaml") + config_filename = os.path.join(os.path.dirname(igibson.__file__), "test", "test.yaml") def load_env(): return iGibsonEnv(config_file=config_filename, mode="headless") diff --git a/igibson/examples/README.md b/igibson/examples/README.md deleted file mode 100644 index 685523a5d..000000000 --- a/igibson/examples/README.md +++ /dev/null @@ -1,19 +0,0 @@ -### Code Examples - -The following examples illustrate the use of iGibson. - -If you are interested in just getting started as an end-user, you only need check out `./environments`. - -If you are looking for examples of BEHAVIOR, the benchmark of household activities that uses iGibson, please check the BEHAVIOR repository at https://github.com/StanfordVL/behavior. - -- environments: how to instantiate iGibson environments with interactive or static scenes, optionally with a scene selector. -- learning: how to train RL policies for robot navigation using stable baselines 3, and how to save and replay demos of agents for imitation learning. -- objects: how to create, load, and place objects to predefined locations or using a logic sampler (e.g. onTop(A, B)), how to change texture as a function of the temperature, and how to generate the minimum volume bounding boxes of objects. -- object_states: how to change various objects states, including dusty, stained, (water sources) toggled on, (cleaning tool) soaked, sliced, and temprature, and how to save and reload object states. -- observations: how to generate different observation modalities such as RGB, depth, LiDAR, segmentation, etc. -- renderer: how to use the renderer directly, without the physics engine. -- robots: how to (keyboard) control robots with differential drive controllers, IK controllers and sampling-based motion planners. -- ros: how to run ROS with iGibson as if it is the real world. -- scenes: how to load interactive and non-interactive scenes, how to use domain randomization (of object models and/or texture), and how to create a tour video of the scenes. -- vr: how to use iGibson with VR. -- web_ui: how to start a web server that hosts iGibson environments. diff --git a/igibson/examples/learning/__init__.py b/igibson/examples/behavior/__init__.py similarity index 100% rename from igibson/examples/learning/__init__.py rename to igibson/examples/behavior/__init__.py diff --git a/igibson/examples/behavior/behavior_debug_replay.py b/igibson/examples/behavior/behavior_debug_replay.py new file mode 100644 index 000000000..b18148eda --- /dev/null +++ b/igibson/examples/behavior/behavior_debug_replay.py @@ -0,0 +1,199 @@ +""" +Debugging script to bypass taksnet +""" + +import argparse +import datetime +import os + +import igibson +from igibson.metrics.agent import AgentMetric +from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings +from igibson.render.mesh_renderer.mesh_renderer_vr import VrSettings +from igibson.robots.behavior_robot import BehaviorRobot +from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene +from igibson.simulator import Simulator +from igibson.utils.ig_logging import IGLogReader, IGLogWriter + +POST_TASK_STEPS = 200 +PHYSICS_WARMING_STEPS = 200 + + +def parse_args(): + parser = argparse.ArgumentParser(description="Run and collect an ATUS demo") + parser.add_argument("--vr_log_path", type=str, help="Path (and filename) of vr log to replay") + parser.add_argument( + "--vr_replay_log_path", type=str, help="Path (and filename) of file to save replay to (for debugging)" + ) + parser.add_argument( + "--frame_save_path", + type=str, + help="Path to save frames (frame number added automatically, as well as .jpg extension)", + ) + parser.add_argument( + "--disable_save", + action="store_true", + help="Whether to disable saving log of replayed trajectory, used for validation.", + ) + parser.add_argument("--profile", action="store_true", help="Whether to print profiling data.") + parser.add_argument( + "--mode", + type=str, + choices=["headless", "vr", "simple"], + help="Whether to disable replay through VR and use iggui instead.", + ) + return parser.parse_args() + + +def replay_demo( + in_log_path, + out_log_path=None, + disable_save=False, + frame_save_path=None, + verbose=True, + mode="headless", + start_callbacks=[], + step_callbacks=[], + end_callbacks=[], + profile=False, +): + """ + Replay a BEHAVIOR demo. + + Note that this returns, but does not check for determinism. Use safe_replay_demo to assert for determinism + when using in scenarios where determinism is important. + + @param in_log_path: the path of the BEHAVIOR demo log to replay. + @param out_log_path: the path of the new BEHAVIOR demo log to save from the replay. + @param frame_save_path: the path to save frame images to. None to disable frame image saving. + @param mode: which rendering mode ("headless", "simple", "vr"). In simple mode, the demo will be replayed with simple robot view. + @param disable_save: Whether saving the replay as a BEHAVIOR demo log should be disabled. + @param profile: Whether the replay should be profiled, with profiler output to stdout. + @param start_callback: A callback function that will be called immediately before starting to replay steps. Should + take a single argument, an iGBEHAVIORActivityInstance. + @param step_callback: A callback function that will be called immediately following each replayed step. Should + take a single argument, an iGBEHAVIORActivityInstance. + @param end_callback: A callback function that will be called when replay has finished. Should take a single + argument, an iGBEHAVIORActivityInstance. + @return if disable_save is True, returns None. Otherwise, returns a boolean indicating if replay was deterministic. + """ + # HDR files for PBR rendering + hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr") + hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr") + light_modulation_map_filename = os.path.join( + igibson.ig_dataset_path, "scenes", "Rs_int", "layout", "floor_lighttype_0.png" + ) + background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg") + + # VR rendering settings + vr_rendering_settings = MeshRendererSettings( + optimized=True, + fullscreen=False, + env_texture_filename=hdr_texture, + env_texture_filename2=hdr_texture2, + env_texture_filename3=background_texture, + light_modulation_map_filename=light_modulation_map_filename, + enable_shadow=True, + enable_pbr=True, + msaa=False, + light_dimming_factor=1.0, + ) + + # Check mode + assert mode in ["headless", "vr", "simple"] + + # Initialize settings to save action replay frames + vr_settings = VrSettings(config_str=IGLogReader.read_metadata_attr(in_log_path, "/metadata/vr_settings")) + vr_settings.set_frame_save_path(frame_save_path) + + physics_timestep = IGLogReader.read_metadata_attr(in_log_path, "/metadata/physics_timestep") + render_timestep = IGLogReader.read_metadata_attr(in_log_path, "/metadata/render_timestep") + filter_objects = IGLogReader.read_metadata_attr(in_log_path, "/metadata/filter_objects") + + scene_id = "Rs_int" + # VR system settings + s = Simulator(mode="vr", rendering_settings=vr_rendering_settings, vr_settings=VrSettings(use_vr=True)) + + scene = InteractiveIndoorScene( + scene_id, load_object_categories=["walls", "floors", "ceilings"], load_room_types=["kitchen"] + ) + + vr_agent = BehaviorRobot(s) + s.import_ig_scene(scene) + s.import_behavior_robot(vr_agent) + s.register_main_vr_robot(vr_agent) + vr_agent.set_position_orientation([0, 0, 1.5], [0, 0, 0, 1]) + + log_reader = IGLogReader(in_log_path, log_status=False) + + log_writer = None + if not disable_save: + timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + if out_log_path == None: + out_log_path = "{}_{}_replay.hdf5".format(scene_id, timestamp) + + log_writer = IGLogWriter( + s, + log_filepath=out_log_path, + store_vr=False, + vr_robot=vr_agent, + profiling_mode=profile, + filter_objects=filter_objects, + ) + log_writer.set_up_data_storage() + + for callback in start_callbacks: + callback(vr_agent, log_reader) + + task_done = False + satisfied_predicates = {} + while log_reader.get_data_left_to_read(): + + s.step(print_stats=profile) + + # Set camera each frame + if mode == "vr": + log_reader.set_replay_camera(s) + + for callback in step_callbacks: + callback(vr_agent, log_reader) + + # Get relevant VR action data and update VR agent + vr_agent.apply_action(log_reader.get_agent_action("vr_robot")) + + if not disable_save: + log_writer.process_frame() + + demo_statistics = {} + for callback in end_callbacks: + callback(vr_agent, log_reader) + + s.disconnect() + + demo_statistics = { + "task_done": task_done, + "satisfied_predicates": satisfied_predicates, + "total_frame_num": log_reader.total_frame_num, + } + return demo_statistics + + +def main(): + args = parse_args() + agent_metrics = AgentMetric() + replay_demo( + args.vr_log_path, + out_log_path=args.vr_replay_log_path, + step_callbacks=[agent_metrics.step_callback], + disable_save=args.disable_save, + frame_save_path=args.frame_save_path, + mode=args.mode, + profile=args.profile, + ) + import pdb + + pdb.set_trace() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/behavior/behavior_debug_vr.py b/igibson/examples/behavior/behavior_debug_vr.py new file mode 100644 index 000000000..0e60af2d8 --- /dev/null +++ b/igibson/examples/behavior/behavior_debug_vr.py @@ -0,0 +1,104 @@ +""" +Debugging script to bypass taksnet +""" + +import copy +import datetime +import os + +import numpy as np + +import igibson +from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings +from igibson.render.mesh_renderer.mesh_renderer_vr import VrSettings +from igibson.robots.behavior_robot import BehaviorRobot +from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene +from igibson.simulator import Simulator +from igibson.utils.ig_logging import IGLogWriter + +POST_TASK_STEPS = 200 +PHYSICS_WARMING_STEPS = 200 + + +def main(): + collect_demo(scene_id="Rs_int") + + +def collect_demo(scene_id, vr_log_path=None, disable_save=False, no_vr=False, profile=False): + # HDR files for PBR rendering + hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr") + hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr") + light_modulation_map_filename = os.path.join( + igibson.ig_dataset_path, "scenes", "Rs_int", "layout", "floor_lighttype_0.png" + ) + background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg") + + # VR rendering settings + vr_rendering_settings = MeshRendererSettings( + optimized=True, + fullscreen=False, + env_texture_filename=hdr_texture, + env_texture_filename2=hdr_texture2, + env_texture_filename3=background_texture, + light_modulation_map_filename=light_modulation_map_filename, + enable_shadow=True, + enable_pbr=True, + msaa=False, + light_dimming_factor=1.0, + ) + + s = Simulator(mode="vr", rendering_settings=vr_rendering_settings, vr_settings=VrSettings(use_vr=True)) + + scene = InteractiveIndoorScene( + scene_id, load_object_categories=["walls", "floors", "ceilings"], load_room_types=["kitchen"] + ) + + vr_agent = BehaviorRobot(s) + s.import_ig_scene(scene) + s.import_behavior_robot(vr_agent) + s.register_main_vr_robot(vr_agent) + vr_agent.set_position_orientation([0, 0, 1.5], [0, 0, 0, 1]) + + # VR system settings + + log_writer = None + if not disable_save: + timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + if vr_log_path is None: + vr_log_path = "{}_{}_{}.hdf5".format("behavior_dummy_demo", scene_id, timestamp) + log_writer = IGLogWriter( + s, + log_filepath=vr_log_path, + task=None, + store_vr=False if no_vr else True, + vr_robot=vr_agent, + profiling_mode=profile, + filter_objects=False, + ) + log_writer.set_up_data_storage() + + physics_warming_steps = copy.deepcopy(PHYSICS_WARMING_STEPS) + + steps = 2000 + for _ in range(steps): + + s.step() + action = s.gen_vr_robot_action() + if steps < physics_warming_steps: + action = np.zeros_like(action) + + vr_agent.apply_action(action) + + if log_writer and not disable_save: + log_writer.process_frame() + + steps += 1 + + if log_writer and not disable_save: + log_writer.end_log_session() + + s.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/behavior/behavior_demo_batch.py b/igibson/examples/behavior/behavior_demo_batch.py new file mode 100644 index 000000000..3ff53d0db --- /dev/null +++ b/igibson/examples/behavior/behavior_demo_batch.py @@ -0,0 +1,83 @@ +""" +BEHAVIOR demo batch analysis script +""" + +import json +import logging +import os +from pathlib import Path + +import bddl +import pandas as pd + +from igibson.examples.behavior.behavior_demo_replay import replay_demo + + +def behavior_demo_batch( + demo_root, log_manifest, out_dir, get_callbacks_callback, skip_existing=True, save_frames=False +): + """ + Execute replay analysis functions (provided through callbacks) on a batch of BEHAVIOR demos. + + @param demo_root: Directory containing the demo files listed in the manifests. + @param log_manifest: The manifest file containing list of BEHAVIOR demos to batch over. + @param out_dir: Directory to store results in. + @param get_callbacks_callback: A function that will be called for each demo that needs to return + a four-tuple: (start_callbacks, step_callbacks, end_callbacks, data_callbacks). Each of the + the first three callback function sets need to be compatible with the behavior_demo_replay + API and will be used for this purpose for that particular demo. The data callbacks should + take no arguments and return a dictionary to be included in the demo's replay data that will + be saved in the end. + @param skip_existing: Whether demos with existing output logs should be skipped. + @param save_frames: Whether the demo's frames should be saved alongside statistics. + """ + logger = logging.getLogger() + logger.disabled = True + + bddl.set_backend("iGibson") + + demo_list = pd.read_csv(log_manifest) + + for idx, demo in enumerate(demo_list["demos"]): + if "replay" in demo: + continue + + demo_name = os.path.splitext(demo)[0] + demo_path = os.path.join(demo_root, demo) + replay_path = os.path.join(out_dir, demo_name + "_replay.hdf5") + log_path = os.path.join(out_dir, demo_name + ".json") + + if skip_existing and os.path.exists(log_path): + print("Skipping existing demo: {}, {} out of {}".format(demo, idx, len(demo_list["demos"]))) + continue + + print("Replaying demo: {}, {} out of {}".format(demo, idx, len(demo_list["demos"]))) + + curr_frame_save_path = None + if save_frames: + curr_frame_save_path = os.path.join(out_dir, demo_name + ".mp4") + + try: + start_callbacks, step_callbacks, end_callbacks, data_callbacks = get_callbacks_callback() + demo_information = replay_demo( + in_log_path=demo_path, + out_log_path=replay_path, + frame_save_path=curr_frame_save_path, + start_callbacks=start_callbacks, + step_callbacks=step_callbacks, + end_callbacks=end_callbacks, + mode="headless", + verbose=False, + ) + demo_information["failed"] = False + demo_information["filename"] = Path(demo).name + + for callback in data_callbacks: + demo_information.update(callback()) + + except Exception as e: + print("Demo failed withe error: ", e) + demo_information = {"demo_id": Path(demo).name, "failed": True, "failure_reason": str(e)} + + with open(log_path, "w") as file: + json.dump(demo_information, file) diff --git a/igibson/examples/behavior/behavior_demo_checkpoints.py b/igibson/examples/behavior/behavior_demo_checkpoints.py new file mode 100644 index 000000000..727f59090 --- /dev/null +++ b/igibson/examples/behavior/behavior_demo_checkpoints.py @@ -0,0 +1,34 @@ +"""Save checkpoints from a BEHAVIOR demo.""" +import os + +import bddl + +import igibson +from igibson.examples.demo.vr_demos.atus.behavior_demo_replay import safe_replay_demo +from igibson.utils.checkpoint_utils import save_checkpoint + +bddl.set_backend("iGibson") + + +def create_checkpoints(demo_file, checkpoint_directory, checkpoint_every_n_steps): + # Create a step callback function to feed replay steps into checkpoints. + def step_callback(igbhvr_act_inst): + if ( + not igbhvr_act_inst.current_success + and igbhvr_act_inst.simulator.frame_count % checkpoint_every_n_steps == 0 + ): + save_checkpoint(igbhvr_act_inst.simulator, checkpoint_directory) + + safe_replay_demo(demo_file, mode="headless", step_callback=step_callback) + + +def main(): + demo_file = os.path.join(igibson.ig_dataset_path, "tests", "storing_food_0_Rs_int_2021-05-31_11-49-30.hdf5") + checkpoint_directory = "checkpoints" + if not os.path.exists(checkpoint_directory): + os.mkdir(checkpoint_directory) + create_checkpoints(demo_file, checkpoint_directory, 50) + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/behavior/behavior_demo_collection.py b/igibson/examples/behavior/behavior_demo_collection.py new file mode 100644 index 000000000..613f277ee --- /dev/null +++ b/igibson/examples/behavior/behavior_demo_collection.py @@ -0,0 +1,218 @@ +""" +Main BEHAVIOR demo collection entrypoint +""" + +import argparse +import copy +import datetime +import os + +import bddl +import numpy as np + +import igibson +from igibson.activity.activity_base import iGBEHAVIORActivityInstance +from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings +from igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher, VrSettings +from igibson.simulator import Simulator +from igibson.utils.ig_logging import IGLogWriter + +POST_TASK_STEPS = 200 +PHYSICS_WARMING_STEPS = 200 + + +def parse_args(): + scene_choices = [ + "Beechwood_0_int", + "Beechwood_1_int", + "Benevolence_0_int", + "Benevolence_1_int", + "Benevolence_2_int", + "Ihlen_0_int", + "Ihlen_1_int", + "Merom_0_int", + "Merom_1_int", + "Pomaria_0_int", + "Pomaria_1_int", + "Pomaria_2_int", + "Rs_int", + "Wainscott_0_int", + "Wainscott_1_int", + ] + + task_id_choices = [0, 1] + parser = argparse.ArgumentParser(description="Run and collect an ATUS demo") + parser.add_argument( + "--task", type=str, required=True, nargs="?", help="Name of ATUS activity matching parent folder in bddl." + ) + parser.add_argument( + "--task_id", + type=int, + required=True, + choices=task_id_choices, + nargs="?", + help="BDDL integer ID, matching suffix of bddl.", + ) + parser.add_argument("--vr_log_path", type=str, help="Path (and filename) of vr log") + parser.add_argument( + "--scene", type=str, choices=scene_choices, nargs="?", help="Scene name/ID matching iGibson interactive scenes." + ) + parser.add_argument("--disable_save", action="store_true", help="Whether to disable saving logfiles.") + parser.add_argument( + "--disable_scene_cache", action="store_true", help="Whether to disable using pre-initialized scene caches." + ) + parser.add_argument("--profile", action="store_true", help="Whether to print profiling data.") + parser.add_argument( + "--no_vr", action="store_true", help="Whether to turn off VR recording and save random actions." + ) + parser.add_argument("--max_steps", type=int, default=-1, help="Maximum number of steps to record before stopping.") + return parser.parse_args() + + +def main(): + args = parse_args() + bddl.set_backend("iGibson") + collect_demo( + args.task, + args.task_id, + args.scene, + args.vr_log_path, + args.disable_save, + args.max_steps, + args.no_vr, + args.disable_scene_cache, + args.profile, + ) + + +def collect_demo( + task, + task_id, + scene, + vr_log_path=None, + disable_save=False, + max_steps=-1, + no_vr=False, + disable_scene_cache=False, + profile=False, +): + # HDR files for PBR rendering + hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr") + hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr") + light_modulation_map_filename = os.path.join( + igibson.ig_dataset_path, "scenes", "Rs_int", "layout", "floor_lighttype_0.png" + ) + background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg") + + # VR rendering settings + vr_rendering_settings = MeshRendererSettings( + optimized=True, + fullscreen=False, + env_texture_filename=hdr_texture, + env_texture_filename2=hdr_texture2, + env_texture_filename3=background_texture, + light_modulation_map_filename=light_modulation_map_filename, + enable_shadow=True, + enable_pbr=True, + msaa=False, + light_dimming_factor=1.0, + ) + + # VR system settings + mode = "headless" if no_vr else "vr" + s = Simulator( + mode=mode, + rendering_settings=vr_rendering_settings, + vr_settings=VrSettings(use_vr=True), + physics_timestep=1 / 300.0, + render_timestep=1 / 30.0, + ) + igbhvr_act_inst = iGBEHAVIORActivityInstance(task, task_id) + + scene_kwargs = None + online_sampling = True + + if not disable_scene_cache: + scene_kwargs = { + "urdf_file": "{}_task_{}_{}_0_fixed_furniture".format(scene, task, task_id), + } + online_sampling = False + + igbhvr_act_inst.initialize_simulator( + simulator=s, scene_id=scene, scene_kwargs=scene_kwargs, load_clutter=True, online_sampling=online_sampling + ) + vr_agent = igbhvr_act_inst.simulator.robots[0] + + if not no_vr: + vr_cs = VrConditionSwitcher( + igbhvr_act_inst.simulator, igbhvr_act_inst.show_instruction, igbhvr_act_inst.iterate_instruction + ) + + log_writer = None + if not disable_save: + timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + if vr_log_path is None: + vr_log_path = "{}_{}_{}_{}.hdf5".format(task, task_id, scene, timestamp) + log_writer = IGLogWriter( + s, + log_filepath=vr_log_path, + task=igbhvr_act_inst, + store_vr=False if no_vr else True, + vr_robot=vr_agent, + profiling_mode=profile, + filter_objects=True, + ) + log_writer.set_up_data_storage() + + satisfied_predicates_cached = {} + post_task_steps = copy.deepcopy(POST_TASK_STEPS) + physics_warming_steps = copy.deepcopy(PHYSICS_WARMING_STEPS) + + steps = 0 + while max_steps < 0 or steps < max_steps: + igbhvr_act_inst.simulator.step(print_stats=profile) + task_done, satisfied_predicates = igbhvr_act_inst.check_success() + + if no_vr: + if steps < 2: + action = np.zeros((28,)) + action[19] = 1 + action[27] = 1 + else: + action = np.random.uniform(-0.01, 0.01, size=(28,)) + else: + action = igbhvr_act_inst.simulator.gen_vr_robot_action() + if steps < physics_warming_steps: + action = np.zeros_like(action) + + vr_agent.apply_action(action) + + if not no_vr: + if satisfied_predicates != satisfied_predicates_cached: + vr_cs.refresh_condition(switch=False) + satisfied_predicates_cached = satisfied_predicates + + if igbhvr_act_inst.simulator.query_vr_event("right_controller", "overlay_toggle"): + vr_cs.refresh_condition() + + if igbhvr_act_inst.simulator.query_vr_event("left_controller", "overlay_toggle"): + vr_cs.toggle_show_state() + + if log_writer and not disable_save: + log_writer.process_frame() + + if task_done: + post_task_steps -= 1 + if post_task_steps == 0: + break + + steps += 1 + + if log_writer and not disable_save: + log_writer.end_log_session() + + s.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/behavior/behavior_demo_metrics_batch.py b/igibson/examples/behavior/behavior_demo_metrics_batch.py new file mode 100644 index 000000000..2837dcb83 --- /dev/null +++ b/igibson/examples/behavior/behavior_demo_metrics_batch.py @@ -0,0 +1,35 @@ +import argparse + +from igibson.examples.behavior.behavior_demo_batch import behavior_demo_batch +from igibson.metrics.agent import AgentMetric +from igibson.metrics.disarrangement import KinematicDisarrangement, LogicalDisarrangement +from igibson.metrics.gaze import GazeMetric +from igibson.metrics.task import TaskMetric + + +def parse_args(): + parser = argparse.ArgumentParser(description="Collect metrics from BEHAVIOR demos in manifest.") + parser.add_argument("demo_root", type=str, help="Directory containing demos listed in the manifest.") + parser.add_argument("log_manifest", type=str, help="Plain text file consisting of list of demos to replay.") + parser.add_argument("out_dir", type=str, help="Directory to store results in.") + return parser.parse_args() + + +def main(): + args = parse_args() + + def get_metrics_callbacks(): + metrics = [KinematicDisarrangement(), LogicalDisarrangement(), AgentMetric(), GazeMetric(), TaskMetric()] + + return ( + [metric.start_callback for metric in metrics], + [metric.step_callback for metric in metrics], + [metric.end_callback for metric in metrics], + [metric.gather_results for metric in metrics], + ) + + behavior_demo_batch(args.demo_root, args.log_manifest, args.out_dir, get_metrics_callbacks) + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/learning/demo_replaying_example.py b/igibson/examples/behavior/behavior_demo_replay.py similarity index 62% rename from igibson/examples/learning/demo_replaying_example.py rename to igibson/examples/behavior/behavior_demo_replay.py index ef86267d4..5f0e374da 100644 --- a/igibson/examples/learning/demo_replaying_example.py +++ b/igibson/examples/behavior/behavior_demo_replay.py @@ -1,36 +1,24 @@ +""" +Main BEHAVIOR demo replay entrypoint +""" + import argparse import datetime -import logging import os import pprint +import bddl import h5py import numpy as np import igibson -from igibson.envs.igibson_env import iGibsonEnv +from igibson.activity.activity_base import iGBEHAVIORActivityInstance from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings from igibson.render.mesh_renderer.mesh_renderer_vr import VrSettings +from igibson.simulator import Simulator from igibson.utils.git_utils import project_git_info from igibson.utils.ig_logging import IGLogReader, IGLogWriter -from igibson.utils.utils import parse_config, parse_str_config - - -def main(): - """ - Example of how to replay a previously recorded demo of a task - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - args = parse_args() - replay_demo( - args.log_path, - out_log_path=args.replay_log_path, - disable_save=args.disable_save, - frame_save_path=args.frame_save_path, - mode=args.mode, - profile=args.profile, - config_file=args.config, - ) +from igibson.utils.utils import parse_str_config def verify_determinism(in_log_path, out_log_path): @@ -40,22 +28,18 @@ def verify_determinism(in_log_path, out_log_path): for attribute in original_file["physics_data"][obj]: is_close = np.isclose( original_file["physics_data"][obj][attribute], new_file["physics_data"][obj][attribute] - ) - is_deterministic = is_deterministic and is_close.all() - if not is_close.all(): - logging.info( - "Mismatch for obj {} with mismatched attribute {} starting at timestep {}".format( - obj, attribute, np.where(is_close == False)[0][0] - ) - ) + ).all() + is_deterministic = is_deterministic and is_close + if not is_close: + print("Mismatch for obj {} with mismatched attribute {}".format(obj, attribute)) return bool(is_deterministic) def parse_args(): parser = argparse.ArgumentParser(description="Run and collect an ATUS demo") - parser.add_argument("--log_path", type=str, required=True, help="Path (and filename) of log to replay") + parser.add_argument("--vr_log_path", type=str, help="Path (and filename) of vr log to replay") parser.add_argument( - "--replay_log_path", type=str, help="Path (and filename) of file to save replay to (for debugging)" + "--vr_replay_log_path", type=str, help="Path (and filename) of file to save replay to (for debugging)" ) parser.add_argument( "--frame_save_path", @@ -71,14 +55,8 @@ def parse_args(): parser.add_argument( "--mode", type=str, - default="gui_non_interactive", - choices=["headless", "headless_tensor", "vr", "gui_non_interactive"], - help="Mode to run simulator in", - ) - parser.add_argument( - "--config", - help="which config file to use [default: use yaml files in examples/configs]", - default=os.path.join(igibson.example_config_path, "behavior_vr.yaml"), + choices=["headless", "vr", "simple"], + help="Whether to disable replay through VR and use iggui instead.", ) return parser.parse_args() @@ -90,37 +68,29 @@ def replay_demo( frame_save_path=None, verbose=True, mode="headless", - config_file=os.path.join(igibson.example_config_path, "behavior_vr.yaml"), start_callbacks=[], step_callbacks=[], end_callbacks=[], profile=False, - image_size=(1280, 720), - use_pb_gui=False, ): """ - Replay a demo of a task. + Replay a BEHAVIOR demo. Note that this returns, but does not check for determinism. Use safe_replay_demo to assert for determinism when using in scenarios where determinism is important. @param in_log_path: the path of the BEHAVIOR demo log to replay. @param out_log_path: the path of the new BEHAVIOR demo log to save from the replay. - @param disable_save: Whether saving the replay as a BEHAVIOR demo log should be disabled. @param frame_save_path: the path to save frame images to. None to disable frame image saving. - @param verbose: Whether to print out git diff in detail - @param mode: which rendering mode ("headless", "headless_tensor", "gui_non_interactive", "vr"). In gui_non_interactive - mode, the demo will be replayed with simple robot view. - @param config_file: environment config file + @param mode: which rendering mode ("headless", "simple", "vr"). In simple mode, the demo will be replayed with simple robot view. + @param disable_save: Whether saving the replay as a BEHAVIOR demo log should be disabled. + @param profile: Whether the replay should be profiled, with profiler output to stdout. @param start_callback: A callback function that will be called immediately before starting to replay steps. Should - take two arguments: iGibsonEnv and IGLogReader + take a single argument, an iGBEHAVIORActivityInstance. @param step_callback: A callback function that will be called immediately following each replayed step. Should - take two arguments: iGibsonEnv and IGLogReader - @param end_callback: A callback function that will be called when replay has finished. Should - take two arguments: iGibsonEnv and IGLogReader - @param profile: Whether the replay should be profiled, with profiler output to stdout. - @param image_size: The image size that should be used by the renderer. - @param use_pb_gui: display the interactive pybullet gui (for debugging) + take a single argument, an iGBEHAVIORActivityInstance. + @param end_callback: A callback function that will be called when replay has finished. Should take a single + argument, an iGBEHAVIORActivityInstance. @return if disable_save is True, returns None. Otherwise, returns a boolean indicating if replay was deterministic. """ # HDR files for PBR rendering @@ -132,7 +102,7 @@ def replay_demo( background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg") # VR rendering settings - rendering_setting = MeshRendererSettings( + vr_rendering_settings = MeshRendererSettings( optimized=True, fullscreen=False, env_texture_filename=hdr_texture, @@ -146,7 +116,7 @@ def replay_demo( ) # Check mode - assert mode in ["headless", "headless_tensor", "vr", "gui_non_interactive"] + assert mode in ["headless", "vr", "simple"] # Initialize settings to save action replay frames vr_settings = VrSettings(config_str=IGLogReader.read_metadata_attr(in_log_path, "/metadata/vr_settings")) @@ -158,14 +128,6 @@ def replay_demo( physics_timestep = IGLogReader.read_metadata_attr(in_log_path, "/metadata/physics_timestep") render_timestep = IGLogReader.read_metadata_attr(in_log_path, "/metadata/render_timestep") filter_objects = IGLogReader.read_metadata_attr(in_log_path, "/metadata/filter_objects") - instance_id = IGLogReader.read_metadata_attr(in_log_path, "/metadata/instance_id") - urdf_file = IGLogReader.read_metadata_attr(in_log_path, "/metadata/urdf_file") - - if urdf_file is None: - urdf_file = "{}_task_{}_{}_0_fixed_furniture".format(scene, task, task_id) - - if instance_id is None: - instance_id = 0 logged_git_info = IGLogReader.read_metadata_attr(in_log_path, "/metadata/git_info") logged_git_info = parse_str_config(logged_git_info) @@ -189,80 +151,82 @@ def replay_demo( print("Current git info:\n") pp.pprint(git_info[key]) - config = parse_config(config_file) - config["task"] = task - config["task_id"] = task_id - config["scene_id"] = scene - config["instance_id"] = instance_id - config["urdf_file"] = urdf_file - config["image_width"] = image_size[0] - config["image_height"] = image_size[1] - config["online_sampling"] = False - - env = iGibsonEnv( - config_file=config, + # VR system settings + s = Simulator( mode=mode, - action_timestep=render_timestep, physics_timestep=physics_timestep, - rendering_settings=rendering_setting, + render_timestep=render_timestep, + rendering_settings=vr_rendering_settings, vr_settings=vr_settings, - use_pb_gui=use_pb_gui, + image_width=1280, + image_height=720, ) - env.reset() - robot = env.robots[0] + igbhvr_act_inst = iGBEHAVIORActivityInstance(task, task_id) + igbhvr_act_inst.initialize_simulator( + simulator=s, + scene_id=scene, + scene_kwargs={ + "urdf_file": "{}_task_{}_{}_0_fixed_furniture".format(scene, task, task_id), + }, + load_clutter=True, + online_sampling=False, + ) + vr_agent = igbhvr_act_inst.simulator.robots[0] log_reader = IGLogReader(in_log_path, log_status=False) + log_writer = None if not disable_save: timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") - if out_log_path is None: - out_log_path = "{}_{}_{}_{}_{}_replay.hdf5".format(task, task_id, scene, instance_id, timestamp) + if out_log_path == None: + out_log_path = "{}_{}_{}_{}_replay.hdf5".format(task, task_id, scene, timestamp) log_writer = IGLogWriter( - env.simulator, + s, log_filepath=out_log_path, - task=env.task, + task=igbhvr_act_inst, store_vr=False, - vr_robot=robot, + vr_robot=vr_agent, profiling_mode=profile, filter_objects=filter_objects, ) log_writer.set_up_data_storage() - try: - for callback in start_callbacks: - callback(env, log_reader) - - task_done = False + for callback in start_callbacks: + callback(igbhvr_act_inst, log_reader) - while log_reader.get_data_left_to_read(): - env.step(log_reader.get_agent_action("vr_robot")) - task_done |= env.task.check_success()[0] + task_done = False + while log_reader.get_data_left_to_read(): - # Set camera each frame - if mode == "vr": - log_reader.set_replay_camera(env.simulator) + igbhvr_act_inst.simulator.step(print_stats=profile) + task_done, _ = igbhvr_act_inst.check_success() - for callback in step_callbacks: - callback(env, log_reader) + # Set camera each frame + if mode == "vr": + log_reader.set_replay_camera(s) - if not disable_save: - log_writer.process_frame() + for callback in step_callbacks: + callback(igbhvr_act_inst, log_reader) - logging.info("Demo was successfully completed: {}".format(task_done)) + # Get relevant VR action data and update VR agent + vr_agent.apply_action(log_reader.get_agent_action("vr_robot")) - demo_statistics = {} - for callback in end_callbacks: - callback(env, log_reader) - finally: - env.close() if not disable_save: - log_writer.end_log_session() + log_writer.process_frame() + + print("Demo was succesfully completed: ", task_done) + + demo_statistics = {} + for callback in end_callbacks: + callback(igbhvr_act_inst, log_reader) + + s.disconnect() is_deterministic = None if not disable_save: + log_writer.end_log_session() is_deterministic = verify_determinism(in_log_path, out_log_path) - logging.info("Demo was deterministic: {}".format(is_deterministic)) + print("Demo was deterministic: ", is_deterministic) demo_statistics = { "deterministic": is_deterministic, @@ -283,5 +247,18 @@ def safe_replay_demo(*args, **kwargs): ), "Replay was not deterministic (or was executed with disable_save=True)." +def main(): + args = parse_args() + bddl.set_backend("iGibson") + replay_demo( + args.vr_log_path, + out_log_path=args.vr_replay_log_path, + disable_save=args.disable_save, + frame_save_path=args.frame_save_path, + mode=args.mode, + profile=args.profile, + ) + + if __name__ == "__main__": main() diff --git a/igibson/examples/behavior/behavior_demo_replay_rl.py b/igibson/examples/behavior/behavior_demo_replay_rl.py new file mode 100644 index 000000000..c380721e4 --- /dev/null +++ b/igibson/examples/behavior/behavior_demo_replay_rl.py @@ -0,0 +1,279 @@ +""" +BEHAVIOR RL episodes replay entrypoint +""" + +import argparse +import datetime +import os +import pprint + +import bddl +import h5py +import numpy as np + +import igibson +from igibson.activity.activity_base import iGBEHAVIORActivityInstance +from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings +from igibson.render.mesh_renderer.mesh_renderer_vr import VrSettings +from igibson.simulator import Simulator +from igibson.utils.git_utils import project_git_info +from igibson.utils.ig_logging import IGLogReader, IGLogWriter +from igibson.utils.utils import parse_str_config + + +def verify_determinism(in_log_path, out_log_path): + is_deterministic = True + with h5py.File(in_log_path) as original_file, h5py.File(out_log_path) as new_file: + for obj in original_file["physics_data"]: + for attribute in original_file["physics_data"][obj]: + is_close = np.isclose( + original_file["physics_data"][obj][attribute], new_file["physics_data"][obj][attribute] + ).all() + is_deterministic = is_deterministic and is_close + if not is_close: + print("Mismatch for obj {} with mismatched attribute {}".format(obj, attribute)) + return bool(is_deterministic) + + +def parse_args(): + parser = argparse.ArgumentParser(description="Run and collect an ATUS demo") + parser.add_argument("--vr_log_path", type=str, help="Path (and filename) of vr log to replay") + parser.add_argument( + "--vr_replay_log_path", type=str, help="Path (and filename) of file to save replay to (for debugging)" + ) + parser.add_argument( + "--frame_save_path", + type=str, + help="Path to save frames (frame number added automatically, as well as .jpg extension)", + ) + parser.add_argument( + "--disable_save", + action="store_true", + help="Whether to disable saving log of replayed trajectory, used for validation.", + ) + parser.add_argument("--profile", action="store_true", help="Whether to print profiling data.") + parser.add_argument( + "--mode", + type=str, + choices=["headless", "vr", "simple"], + help="Whether to disable replay through VR and use iggui instead.", + ) + return parser.parse_args() + + +def replay_demo( + in_log_path, + out_log_path=None, + disable_save=False, + frame_save_path=None, + verbose=True, + mode="headless", + start_callbacks=[], + step_callbacks=[], + end_callbacks=[], + profile=False, +): + """ + Replay a BEHAVIOR demo. + + Note that this returns, but does not check for determinism. Use safe_replay_demo to assert for determinism + when using in scenarios where determinism is important. + + @param in_log_path: the path of the BEHAVIOR demo log to replay. + @param out_log_path: the path of the new BEHAVIOR demo log to save from the replay. + @param frame_save_path: the path to save frame images to. None to disable frame image saving. + @param mode: which rendering mode ("headless", "simple", "vr"). In simple mode, the demo will be replayed with simple robot view. + @param disable_save: Whether saving the replay as a BEHAVIOR demo log should be disabled. + @param profile: Whether the replay should be profiled, with profiler output to stdout. + @param start_callback: A callback function that will be called immediately before starting to replay steps. Should + take a single argument, an iGBEHAVIORActivityInstance. + @param step_callback: A callback function that will be called immediately following each replayed step. Should + take a single argument, an iGBEHAVIORActivityInstance. + @param end_callback: A callback function that will be called when replay has finished. Should take a single + argument, an iGBEHAVIORActivityInstance. + @return if disable_save is True, returns None. Otherwise, returns a boolean indicating if replay was deterministic. + """ + # HDR files for PBR rendering + hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr") + hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr") + light_modulation_map_filename = os.path.join( + igibson.ig_dataset_path, "scenes", "Rs_int", "layout", "floor_lighttype_0.png" + ) + background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg") + + # VR rendering settings + vr_rendering_settings = MeshRendererSettings( + optimized=True, + fullscreen=False, + env_texture_filename=hdr_texture, + env_texture_filename2=hdr_texture2, + env_texture_filename3=background_texture, + light_modulation_map_filename=light_modulation_map_filename, + enable_shadow=True, + enable_pbr=True, + msaa=False, + light_dimming_factor=1.0, + ) + + # Check mode + assert mode in ["headless", "vr", "simple"] + + # Initialize settings to save action replay frames + vr_settings = VrSettings(config_str=IGLogReader.read_metadata_attr(in_log_path, "/metadata/vr_settings")) + vr_settings.set_frame_save_path(frame_save_path) + + task = IGLogReader.read_metadata_attr(in_log_path, "/metadata/atus_activity") + task_id = IGLogReader.read_metadata_attr(in_log_path, "/metadata/activity_definition") + scene = IGLogReader.read_metadata_attr(in_log_path, "/metadata/scene_id") + physics_timestep = IGLogReader.read_metadata_attr(in_log_path, "/metadata/physics_timestep") + render_timestep = IGLogReader.read_metadata_attr(in_log_path, "/metadata/render_timestep") + filter_objects = IGLogReader.read_metadata_attr(in_log_path, "/metadata/filter_objects") + + logged_git_info = IGLogReader.read_metadata_attr(in_log_path, "/metadata/git_info") + logged_git_info = parse_str_config(logged_git_info) + git_info = project_git_info() + pp = pprint.PrettyPrinter(indent=4) + + for key in logged_git_info.keys(): + logged_git_info[key].pop("directory", None) + git_info[key].pop("directory", None) + if logged_git_info[key] != git_info[key] and verbose: + print("Warning, difference in git commits for repo: {}. This may impact deterministic replay".format(key)) + print("Logged git info:\n") + pp.pprint(logged_git_info[key]) + print("Current git info:\n") + pp.pprint(git_info[key]) + + # VR system settings + s = Simulator( + mode=mode, + physics_timestep=physics_timestep, + render_timestep=render_timestep, + rendering_settings=vr_rendering_settings, + vr_settings=vr_settings, + image_width=1280, + image_height=720, + ) + + igtn_task = iGBEHAVIORActivityInstance(task, task_id) + igtn_task.initialize_simulator( + simulator=s, + scene_id=scene, + scene_kwargs={ + "urdf_file": "{}_neurips_task_{}_{}_0_fixed_furniture".format(scene, task, task_id), + }, + load_clutter=True, + online_sampling=False, + ) + vr_agent = igtn_task.simulator.robots[0] + vr_agent.activate() + igtn_task.reset_scene(snapshot_id=igtn_task.initial_state) + # set the constraints to the current poses + vr_agent.apply_action(np.zeros(28)) + + if not in_log_path: + raise RuntimeError("Must provide a VR log path to run action replay!") + log_reader = IGLogReader(in_log_path, log_status=False) + + log_writer = None + if not disable_save: + timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + if out_log_path == None: + out_log_path = "{}_{}_{}_{}_replay.hdf5".format(task, task_id, scene, timestamp) + + log_writer = IGLogWriter( + s, + log_filepath=out_log_path, + task=igtn_task, + store_vr=False, + vr_robot=vr_agent, + profiling_mode=profile, + filter_objects=filter_objects, + ) + log_writer.set_up_data_storage() + + for callback in start_callbacks: + callback(igtn_task, log_reader) + + task_done = False + while log_reader.get_data_left_to_read(): + + action = log_reader.get_agent_action("vr_robot") + # Get relevant VR action data and update VR agent + vr_agent.apply_action(action) + + if not disable_save: + log_writer.process_frame() + + igtn_task.simulator.step(print_stats=profile) + task_done, _ = igtn_task.check_success() + + # Set camera each frame + if mode == "vr": + log_reader.set_replay_camera(s) + + for callback in step_callbacks: + callback(igtn_task, log_reader) + + # Per-step determinism check. Activate if necessary. + # things_to_compare = [thing for thing in log_writer.name_path_data if thing[0] == "physics_data"] + # for thing in things_to_compare: + # thing_path = "/".join(thing) + # fc = log_reader.frame_counter % log_writer.frames_before_write + # if fc == log_writer.frames_before_write - 1: + # continue + # replayed = log_writer.get_data_for_name_path(thing)[fc] + # original = log_reader.read_value(thing_path) + # if not np.all(replayed == original): + # print("%s not equal in %d" % (thing_path, log_reader.frame_counter)) + # if not np.isclose(replayed, original).all(): + # print("%s not close in %d" % (thing_path, log_reader.frame_counter)) + + print("Demo was succesfully completed: ", task_done) + + demo_statistics = {} + for callback in end_callbacks: + callback(igtn_task, log_reader) + + s.disconnect() + + is_deterministic = None + if not disable_save: + log_writer.end_log_session() + is_deterministic = verify_determinism(in_log_path, out_log_path) + print("Demo was deterministic: ", is_deterministic) + + demo_statistics = { + "deterministic": is_deterministic, + "task": task, + "task_id": int(task_id), + "scene": scene, + "task_done": task_done, + "total_frame_num": log_reader.total_frame_num, + } + return demo_statistics + + +def safe_replay_demo(*args, **kwargs): + """Replays a demo, asserting that it was deterministic.""" + demo_statistics = replay_demo(*args, **kwargs) + assert ( + demo_statistics["deterministic"] == True + ), "Replay was not deterministic (or was executed with disable_save=True)." + + +def main(): + args = parse_args() + bddl.set_backend("iGibson") + replay_demo( + args.vr_log_path, + out_log_path=args.vr_replay_log_path, + disable_save=args.disable_save, + frame_save_path=args.frame_save_path, + mode=args.mode, + profile=args.profile, + ) + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/behavior/behavior_demo_segmentation.py b/igibson/examples/behavior/behavior_demo_segmentation.py new file mode 100644 index 000000000..8c55dac87 --- /dev/null +++ b/igibson/examples/behavior/behavior_demo_segmentation.py @@ -0,0 +1,471 @@ +import argparse +import json +import os +from collections import deque, namedtuple +from enum import Enum + +import bddl +import printree +import pyinstrument + +import igibson +from igibson import object_states +from igibson.activity.activity_base import iGBEHAVIORActivityInstance +from igibson.activity.bddl_backend import ObjectStateBinaryPredicate, ObjectStateUnaryPredicate +from igibson.examples.behavior import behavior_demo_replay +from igibson.object_states import ROOM_STATES, factory +from igibson.object_states.object_state_base import AbsoluteObjectState, BooleanState, RelativeObjectState +from igibson.robots.behavior_robot import BRBody + +StateRecord = namedtuple("StateRecord", ["state_type", "objects", "value"]) +StateEntry = namedtuple("StateEntry", ["frame_count", "state_records"]) +Segment = namedtuple("DiffEntry", ["start", "duration", "end", "state_records", "sub_segments"]) + + +class SegmentationObjectSelection(Enum): + ALL_OBJECTS = 1 + TASK_RELEVANT_OBJECTS = 2 + ROBOTS = 3 + + +class SegmentationStateSelection(Enum): + ALL_STATES = 1 + GOAL_CONDITION_RELEVANT_STATES = 2 + + +class SegmentationStateDirection(Enum): + BOTH_DIRECTIONS = 1 + FALSE_TO_TRUE = 2 + TRUE_TO_FALSE = 3 + + +STATE_DIRECTIONS = { + # Note that some of these states already only go False-to-True so they are left as BOTH_DIRECTIONS + # so as not to add filtering work. + object_states.Burnt: SegmentationStateDirection.BOTH_DIRECTIONS, + object_states.Cooked: SegmentationStateDirection.BOTH_DIRECTIONS, + object_states.Dusty: SegmentationStateDirection.BOTH_DIRECTIONS, + object_states.Frozen: SegmentationStateDirection.BOTH_DIRECTIONS, + object_states.InFOVOfRobot: SegmentationStateDirection.FALSE_TO_TRUE, + object_states.InHandOfRobot: SegmentationStateDirection.FALSE_TO_TRUE, + object_states.InReachOfRobot: SegmentationStateDirection.FALSE_TO_TRUE, + object_states.InSameRoomAsRobot: SegmentationStateDirection.FALSE_TO_TRUE, + object_states.Inside: SegmentationStateDirection.FALSE_TO_TRUE, + object_states.NextTo: SegmentationStateDirection.FALSE_TO_TRUE, + # OnFloor: SegmentationStateDirection.FALSE_TO_TRUE, + object_states.OnTop: SegmentationStateDirection.FALSE_TO_TRUE, + object_states.Open: SegmentationStateDirection.BOTH_DIRECTIONS, + object_states.Sliced: SegmentationStateDirection.BOTH_DIRECTIONS, + object_states.Soaked: SegmentationStateDirection.BOTH_DIRECTIONS, + object_states.Stained: SegmentationStateDirection.BOTH_DIRECTIONS, + object_states.ToggledOn: SegmentationStateDirection.BOTH_DIRECTIONS, + # Touching: SegmentationStateDirection.BOTH_DIRECTIONS, + object_states.Under: SegmentationStateDirection.FALSE_TO_TRUE, +} +STATE_DIRECTIONS.update({state: SegmentationStateDirection.FALSE_TO_TRUE for state in ROOM_STATES}) + +ALLOWED_SUB_SEGMENTS_BY_STATE = { + object_states.Burnt: {object_states.OnTop, object_states.ToggledOn, object_states.Open, object_states.Inside}, + object_states.Cooked: {object_states.OnTop, object_states.ToggledOn, object_states.Open, object_states.Inside}, + object_states.Dusty: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot}, + object_states.Frozen: { + object_states.InReachOfRobot, + object_states.OnTop, + object_states.ToggledOn, + object_states.Open, + object_states.Inside, + }, + object_states.InFOVOfRobot: {}, + object_states.InHandOfRobot: {}, + object_states.InReachOfRobot: {}, + object_states.InSameRoomAsRobot: {}, + object_states.Inside: { + object_states.Open, + object_states.InSameRoomAsRobot, + object_states.InReachOfRobot, + object_states.InHandOfRobot, + }, + object_states.NextTo: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot}, + # OnFloor: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot}, + object_states.OnTop: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot}, + object_states.Open: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot}, + object_states.Sliced: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot}, + object_states.Soaked: { + object_states.ToggledOn, + object_states.InSameRoomAsRobot, + object_states.InReachOfRobot, + object_states.InHandOfRobot, + }, + object_states.Stained: { + object_states.Soaked, + object_states.InSameRoomAsRobot, + object_states.InReachOfRobot, + object_states.InHandOfRobot, + }, + object_states.ToggledOn: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot}, + # Touching: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot}, + object_states.Under: {object_states.InSameRoomAsRobot, object_states.InReachOfRobot, object_states.InHandOfRobot}, +} + + +def process_states(objects, state_types): + predicate_states = set() + + for obj in objects: + for state_type in state_types: + if state_type not in obj.states: + continue + + assert issubclass(state_type, BooleanState) + state = obj.states[state_type] + if isinstance(state, AbsoluteObjectState): + # Add only one instance of absolute state + try: + value = bool(state.get_value()) + record = StateRecord(state_type, (obj,), value) + predicate_states.add(record) + except ValueError: + pass + elif isinstance(state, RelativeObjectState): + # Add one instance per state pair + for other in objects: + try: + value = state.get_value(other) + record = StateRecord(state_type, (obj, other), value) + predicate_states.add(record) + except ValueError: + pass + else: + raise ValueError("Unusable state for segmentation.") + + return predicate_states + + +def _get_goal_condition_states(igbhvr_act_inst: iGBEHAVIORActivityInstance): + state_types = set() + + q = deque() + q.extend(igbhvr_act_inst.goal_conditions) + + while q: + pred = q.popleft() + if isinstance(pred, ObjectStateUnaryPredicate) or isinstance(pred, ObjectStateBinaryPredicate): + state_types.add(pred.STATE_CLASS) + + q.extend(pred.children) + + return state_types + + +class DemoSegmentationProcessor(object): + def __init__( + self, + state_types=None, + object_selection=SegmentationObjectSelection.TASK_RELEVANT_OBJECTS, + label_by_instance=False, + hierarchical=False, + diff_initial=False, + state_directions=STATE_DIRECTIONS, + profiler=None, + ): + self.state_history = [] + self.last_state = None + + self.state_types_option = state_types + self.state_types = None # To be populated in initialize(). + self.state_directions = state_directions + self.object_selection = object_selection + self.label_by_instance = label_by_instance + + self.hierarchical = hierarchical + self.all_state_types = None + + if diff_initial: + self.state_history.append(StateEntry(0, set())) + self.last_state = set() + + self.profiler = profiler + + def start_callback(self, igbhvr_act_inst, _): + self.all_state_types = [ + state + for state in factory.get_all_states() + if ( + issubclass(state, BooleanState) + and (issubclass(state, AbsoluteObjectState) or issubclass(state, RelativeObjectState)) + ) + ] + + if isinstance(self.state_types_option, list) or isinstance(self.state_types_option, set): + self.state_types = self.state_types_option + elif self.state_types_option == SegmentationStateSelection.ALL_STATES: + self.state_types = self.all_state_types + elif self.state_types_option == SegmentationStateSelection.GOAL_CONDITION_RELEVANT_STATES: + self.state_types = _get_goal_condition_states(igbhvr_act_inst) + else: + raise ValueError("Unknown segmentation state selection.") + + def step_callback(self, igbhvr_act_inst, _): + if self.profiler: + self.profiler.start() + + if self.object_selection == SegmentationObjectSelection.TASK_RELEVANT_OBJECTS: + objects = [obj for obj in igbhvr_act_inst.object_scope.values() if not isinstance(obj, BRBody)] + elif self.object_selection == SegmentationObjectSelection.ROBOTS: + objects = [obj for obj in igbhvr_act_inst.object_scope.values() if isinstance(obj, BRBody)] + elif self.object_selection == SegmentationObjectSelection.ALL_OBJECTS: + objects = igbhvr_act_inst.simulator.scene.get_objects() + else: + raise ValueError("Incorrect SegmentationObjectSelection %r" % self.object_selection) + + # Get the processed state. + state_types_to_use = self.state_types if not self.hierarchical else self.all_state_types + processed_state = process_states(objects, state_types_to_use) + if self.last_state is None or (processed_state - self.last_state): + self.state_history.append(StateEntry(igbhvr_act_inst.simulator.frame_count, processed_state)) + + self.last_state = processed_state + + if self.profiler: + self.profiler.stop() + + def obj2str(self, obj): + return obj.name if self.label_by_instance else obj.category + + def _hierarchical_segments(self, state_entries, state_types): + if not state_types: + return [] + + segments = [] + before_idx = 0 + after_idx = 1 + + # Keep iterating until we reach the end of our state entries. + while after_idx < len(state_entries): + # Get the state entries at these keys. + before = state_entries[before_idx] + after = state_entries[after_idx] + + # Check if there is a valid diff at this range. + diffs = self.filter_diffs(after.state_records - before.state_records, state_types) + if diffs is not None: + # If there is a diff, prepare to do sub-segmentation on the segment. + sub_segment_states = set() + if self.hierarchical: + for state_record in diffs: + corresponding_sub_states = ALLOWED_SUB_SEGMENTS_BY_STATE[state_record.state_type] + sub_segment_states.update(corresponding_sub_states) + + sub_segments = self._hierarchical_segments( + state_entries[before_idx : after_idx + 1], sub_segment_states + ) + segments.append( + Segment( + before.frame_count, + after.frame_count - before.frame_count, + after.frame_count, + diffs, + sub_segments, + ) + ) + + # Continue segmentation by moving the before_idx to start here. + before_idx = after_idx + + # Increase the range of elements we're looking at by one. + after_idx += 1 + + return segments + + def get_segments(self): + segments = self._hierarchical_segments(self.state_history, self.state_types) + return Segment(segments[0].start, segments[-1].end - segments[0].start, segments[-1].end, [], segments) + + def filter_diffs(self, state_records, state_types): + """Filter the segments so that only objects in the given state directions are monitored.""" + new_records = set() + + # Go through the records in the segment. + for state_record in state_records: + # Check if the state type is on our list + if state_record.state_type not in state_types: + continue + + # Check if any object in the record is on our list. + mode = self.state_directions[state_record.state_type] + accept = True + if mode == SegmentationStateDirection.FALSE_TO_TRUE: + accept = state_record.value + elif mode == SegmentationStateDirection.TRUE_TO_FALSE: + accept = not state_record.value + + # If an object in our list is part of the record, keep the record. + if accept: + new_records.add(state_record) + + # If we haven't kept any of this segment's records, drop the segment. + if not new_records: + return None + + return new_records + + def _serialize_segment(self, segment): + stringified_entries = [ + { + "name": state_record.state_type.__name__, + "objects": [self.obj2str(obj) for obj in state_record.objects], + "value": state_record.value, + } + for state_record in segment.state_records + ] + + return { + "start": segment.start, + "end": segment.end, + "duration": segment.duration, + "state_records": stringified_entries, + "sub_segments": [self._serialize_segment(sub_segment) for sub_segment in segment.sub_segments], + } + + def _segment_to_dict_tree(self, segment, output_dict): + stringified_entries = [ + ( + state_record.state_type.__name__, + ", ".join(obj.category for obj in state_record.objects), + state_record.value, + ) + for state_record in segment.state_records + ] + + entry_strs = ["%s(%r) = %r" % entry for entry in stringified_entries] + key = "%d-%d: %s" % (segment.start, segment.end, ", ".join(entry_strs)) + sub_segments = {} + for sub in segment.sub_segments: + self._segment_to_dict_tree(sub, sub_segments) + output_dict[key] = sub_segments + + def serialize_segments(self): + # Make the root call to recursive function. + return self._serialize_segment(self.get_segments()) + + def __str__(self): + out = "" + out += "---------------------------------------------------\n" + out += "Segmentation of %s\n" % self.object_selection.name + out += "Considered states: %s\n" % ", ".join(x.__name__ for x in self.state_types) + out += "---------------------------------------------------\n" + output = {} + self._segment_to_dict_tree(self.get_segments(), output) + out += printree.ftree(output) + "\n" + out += "---------------------------------------------------\n" + return out + + +def parse_args(): + parser = argparse.ArgumentParser(description="Run segmentation on an ATUS demo.") + parser.add_argument( + "--log_path", type=str, help="Path (and filename) of log to replay. If empty, test demo will be used." + ) + parser.add_argument( + "--out_dir", type=str, help="Directory to store results in. If empty, test directory will be used." + ) + parser.add_argument( + "--profile", + action="store_true", + help="Whether to profile the segmentation, outputting a profile HTML in the out path.", + ) + return parser.parse_args() + + +def get_default_segmentation_processors(profiler=None): + # This applies a "flat" segmentation (e.g. not hierarchical) using only the states supported by our magic motion + # primitives. + flat_states = [ + object_states.Open, + object_states.OnTop, + object_states.Inside, + object_states.InHandOfRobot, + object_states.InReachOfRobot, + ] + flat_object_segmentation = DemoSegmentationProcessor( + flat_states, SegmentationObjectSelection.TASK_RELEVANT_OBJECTS, label_by_instance=True, profiler=profiler + ) + + # This applies a hierarchical segmentation based on goal condition states. It's WIP and currently unused. + goal_segmentation = DemoSegmentationProcessor( + SegmentationStateSelection.GOAL_CONDITION_RELEVANT_STATES, + SegmentationObjectSelection.TASK_RELEVANT_OBJECTS, + hierarchical=True, + label_by_instance=True, + profiler=profiler, + ) + + # This applies a flat segmentation that allows us to see what room the agent is in during which frames. + room_presence_segmentation = DemoSegmentationProcessor( + ROOM_STATES, SegmentationObjectSelection.ROBOTS, diff_initial=True, profiler=profiler + ) + + return { + # "goal": goal_segmentation, + "flat": flat_object_segmentation, + "room": room_presence_segmentation, + } + + +def main(): + bddl.set_backend("iGibson") + args = parse_args() + + # Select the demo to apply segmentation on. + demo_file = os.path.join(igibson.ig_dataset_path, "tests", "cleaning_windows_0_Rs_int_2021-05-23_23-11-46.hdf5") + if args.log_path: + demo_file = args.log_path + + # Select the output directory. + out_dir = os.path.join(igibson.ig_dataset_path, "tests", "segmentation_results") + if args.out_dir: + out_dir = args.out_dir + + # Create output directory if needed. + if not os.path.exists(out_dir): + os.mkdir(out_dir) + + # Set up the profiler + profiler = None + if args.profile: + profiler = pyinstrument.Profiler() + + # Create default segmentation processors. + segmentation_processors = get_default_segmentation_processors(profiler) + + # Run the segmentations. + behavior_demo_replay.safe_replay_demo( + demo_file, + start_callbacks=[sp.start_callback for sp in segmentation_processors.values()], + step_callbacks=[sp.step_callback for sp in segmentation_processors.values()], + ) + + demo_basename = os.path.splitext(os.path.basename(demo_file))[0] + for segmentation_name, segmentation_processor in segmentation_processors.items(): + json_file = "%s_%s.json" % (demo_basename, segmentation_name) + json_fullpath = os.path.join(out_dir, json_file) + with open(json_fullpath, "w") as f: + json.dump(segmentation_processor.serialize_segments(), f) + + # Print the segmentations. + combined_output = "" + for segmentation_processor in segmentation_processors.values(): + combined_output += str(segmentation_processor) + "\n" + + print(combined_output) + + # Save profiling information. + if args.profile: + html = profiler.output_html() + html_path = os.path.join(out_dir, "segmentation_profile.html") + with open(html_path, "w") as f: + f.write(html) + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/behavior/behavior_demo_segmentation_batch.py b/igibson/examples/behavior/behavior_demo_segmentation_batch.py new file mode 100644 index 000000000..eda76d778 --- /dev/null +++ b/igibson/examples/behavior/behavior_demo_segmentation_batch.py @@ -0,0 +1,39 @@ +import argparse + +from igibson.examples.behavior.behavior_demo_batch import behavior_demo_batch +from igibson.examples.behavior.behavior_demo_segmentation import get_default_segmentation_processors + + +def parse_args(): + parser = argparse.ArgumentParser(description="Collect metrics from BEHAVIOR demos in manifest.") + parser.add_argument("demo_root", type=str, help="Directory containing demos listed in the manifest.") + parser.add_argument("log_manifest", type=str, help="Plain text file consisting of list of demos to replay.") + parser.add_argument("out_dir", type=str, help="Directory to store results in.") + return parser.parse_args() + + +def main(): + args = parse_args() + + def get_segmentation_callbacks(): + # Create default segmentation processors. + segmentation_processors = get_default_segmentation_processors() + + # Create a data callback that unifies the results from the + # segmentation processors. + def data_callback(): + return {"segmentations": {name: sp.serialize_segments() for name, sp in segmentation_processors.items()}} + + # Return all of the callbacks for a particular demo. + return ( + [sp.start_callback for sp in segmentation_processors.values()], + [sp.step_callback for sp in segmentation_processors.values()], + [], + [data_callback], + ) + + behavior_demo_batch(args.demo_root, args.log_manifest, args.out_dir, get_segmentation_callbacks) + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/behavior/behavior_env_metrics.py b/igibson/examples/behavior/behavior_env_metrics.py new file mode 100644 index 000000000..307f6f818 --- /dev/null +++ b/igibson/examples/behavior/behavior_env_metrics.py @@ -0,0 +1,97 @@ +import argparse +import json +import os +import parser + +import bddl + +import igibson +from igibson.envs.behavior_env import BehaviorEnv +from igibson.metrics.agent import BehaviorRobotMetric, FetchRobotMetric +from igibson.metrics.disarrangement import KinematicDisarrangement, LogicalDisarrangement +from igibson.metrics.task import TaskMetric + + +def get_metrics_callbacks(config): + metrics = [ + KinematicDisarrangement(), + LogicalDisarrangement(), + TaskMetric(), + ] + + robot_type = config["robot"] + if robot_type == "FetchGripper": + metrics.append(FetchRobotMetric()) + elif robot_type == "BehaviorRobot": + metrics.append(BehaviorRobotMetric()) + else: + Exception("Metrics only implemented for FetchGripper and BehaviorRobot") + + return ( + [metric.start_callback for metric in metrics], + [metric.step_callback for metric in metrics], + [metric.end_callback for metric in metrics], + [metric.gather_results for metric in metrics], + ) + + +if __name__ == "__main__": + + bddl.set_backend("iGibson") + + parser = argparse.ArgumentParser() + + parser.add_argument( + "--config", + "-c", + default=os.path.join(igibson.root_path, "examples", "configs", "behavior_onboard_sensing_fetch.yaml"), + help="which config file to use [default: use yaml files in examples/configs]", + ) + parser.add_argument( + "--mode", + "-m", + choices=["headless", "simple", "gui", "iggui", "pbgui"], + default="simple", + help="which mode for simulation (default: headless)", + ) + args = parser.parse_args() + + env = BehaviorEnv( + config_file=args.config, + mode=args.mode, + action_timestep=1.0 / 10.0, + physics_timestep=1.0 / 120.0, + ) + + start_callbacks, step_callbacks, end_callbacks, data_callbacks = get_metrics_callbacks(env.config) + + per_episode_metrics = {} + for callback in start_callbacks: + callback(env.task, None) + + for episode in range(10): + env.reset() + for i in range(1000): + action = env.action_space.sample() + state, reward, done, _ = env.step(action) + for callback in step_callbacks: + callback(env.task, None) + if done: + break + + for callback in end_callbacks: + callback(env.task, None) + + metrics_summary = {} + + for callback in data_callbacks: + metrics_summary.update(callback()) + + per_episode_metrics[episode] = metrics_summary + + log_path = "my_igibson_run.json" + + with open(log_path, "w") as file: + json.dump(per_episode_metrics, file) + + env.close() diff --git a/igibson/examples/behavior/behavior_generate_batch_manifest.py b/igibson/examples/behavior/behavior_generate_batch_manifest.py new file mode 100644 index 000000000..f28a750fd --- /dev/null +++ b/igibson/examples/behavior/behavior_generate_batch_manifest.py @@ -0,0 +1,32 @@ +import argparse +import glob +import os + +import numpy as np +import pandas as pd + + +def parse_args(): + parser = argparse.ArgumentParser(description="Script to generate manifest for batch replay demos") + parser.add_argument( + "--demo_directory", type=str, required=True, help="Plain text file consisting of list of demos to replay" + ) + parser.add_argument( + "--split", type=int, default=0, help="Number of times to split the manifest for distributing replay" + ) + return parser.parse_args() + + +def main(): + args = parse_args() + demos = glob.glob(os.path.join(args.demo_directory, "*.hdf5")) + filtered_demos = [demo for demo in demos if "replay" not in demo] + series = {"demos": filtered_demos} + pd.DataFrame(series).to_csv("manifest.csv") + if args.split > 1: + for idx, split in enumerate(np.array_split(filtered_demos, args.split)): + pd.DataFrame({"demos": split}).to_csv("manifest_{}.csv".format(idx)) + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/behavior/behavior_robot_related_states_demo.py b/igibson/examples/behavior/behavior_robot_related_states_demo.py new file mode 100644 index 000000000..74990e723 --- /dev/null +++ b/igibson/examples/behavior/behavior_robot_related_states_demo.py @@ -0,0 +1,51 @@ +import os + +import bddl + +import igibson +from igibson import object_states +from igibson.examples.behavior import behavior_demo_replay + +bddl.set_backend("iGibson") + + +def robot_states_callback(igbhvr_act_inst, _): + window1 = (igbhvr_act_inst.object_scope["window.n.01_1"], "kitchen") + window2 = (igbhvr_act_inst.object_scope["window.n.01_2"], "living room") + windows = [window1, window2] + + for window, roomname in windows: + print( + "%s window is inFOV: %r, inSameRoom: %r, inReach: %r" + % ( + roomname, + window.states[object_states.InFOVOfRobot].get_value(), + window.states[object_states.InSameRoomAsRobot].get_value(), + window.states[object_states.InReachOfRobot].get_value(), + ) + ) + + rag = igbhvr_act_inst.object_scope["rag.n.01_1"] + print("Rag is in hand: %r" % rag.states[object_states.InHandOfRobot].get_value()) + + agent = igbhvr_act_inst.object_scope["agent.n.01_1"] + print( + "Agent is in kitchen: %r, living room: %r, bedroom: %r." + % ( + agent.states[object_states.IsInKitchen].get_value(), + agent.states[object_states.IsInLivingRoom].get_value(), + agent.states[object_states.IsInBedroom].get_value(), + ) + ) + + +def main(): + DEMO_FILE = os.path.join(igibson.ig_dataset_path, "tests", "cleaning_windows_0_Rs_int_2021-05-23_23-11-46.hdf5") + + behavior_demo_replay.replay_demo( + DEMO_FILE, disable_save=True, step_callbacks=[robot_states_callback], mode="headless" + ) + + +if __name__ == "__main__": + main() diff --git a/igibson/configs/behavior_full_observability.yaml b/igibson/examples/configs/behavior_full_observability.yaml similarity index 51% rename from igibson/configs/behavior_full_observability.yaml rename to igibson/examples/configs/behavior_full_observability.yaml index 5b942cada..0fa6d762a 100644 --- a/igibson/configs/behavior_full_observability.yaml +++ b/igibson/examples/configs/behavior_full_observability.yaml @@ -1,31 +1,44 @@ # scene -scene: igibson scene_id: Beechwood_1_int clutter: false build_graph: true load_texture: true pybullet_load_texture: true -should_open_all_doors: false +should_open_all_doors: true # domain randomization texture_randomization_freq: null object_randomization_freq: null # robot -robot: - name: BehaviorRobot +robot: BehaviorRobot # task task: cleaning_cupboards task_id: 0 -instance_id: 0 online_sampling: false +target_dist_min: 1.0 +target_dist_max: 10.0 +goal_format: polar +task_obs_dim: 4 + +# reward +reward_type: l2 +success_reward: 10.0 +potential_reward_weight: 1.0 +collision_reward_weight: -0.1 # discount factor discount_factor: 0.99 # termination condition +dist_tol: 0.36 # body width max_step: 500 +max_collisions_allowed: 500 + +# misc config +initial_pos_z_offset: 0.1 +collision_ignore_link_a_ids: [0, 1, 2] # ignore collisions with these robot links # sensor spec output: [proprioception, rgb, highlight, depth, seg, ins_seg, task_obs] @@ -41,3 +54,12 @@ depth_high: 10.0 # sensor noise depth_noise_rate: 0.0 scan_noise_rate: 0.0 + +# visual objects +visual_object_at_initial_target_pos: true +target_visual_object_visible_to_agent: false + +# speed limit +hand_threshold: 0.4 +hand_speed: 0.3 +body_speed: 0.3 diff --git a/igibson/examples/configs/behavior_full_observability_fetch.yaml b/igibson/examples/configs/behavior_full_observability_fetch.yaml new file mode 100644 index 000000000..6dd8d127f --- /dev/null +++ b/igibson/examples/configs/behavior_full_observability_fetch.yaml @@ -0,0 +1,80 @@ +# scene +scene_id: Beechwood_1_int +clutter: false +build_graph: true +load_texture: true +pybullet_load_texture: true +should_open_all_doors: true + +# domain randomization +texture_randomization_freq: null +object_randomization_freq: null + +# robot +robot: FetchGripper +use_ag: true +ag_strict_mode: true +default_arm_pose: diagonal30 +trunk_offset: 0.085 +controller: + mode: pose_delta_ori # options are {pose_delta_ori, position_fixed_ori, position_compliant_ori} + kv_vel: -2.0 + input_max: [1, 1, 1, 1, 1, 1] + input_min: [-1, -1, -1, -1, -1, -1] + output_max: [0.2, 0.2, 0.2, 0.5, 0.5, 0.5] + output_min: [-0.2, -0.2, -0.2, -0.5, -0.5, -0.5] + eef_always_in_frame: false # if true, will add hard workspace left-right constraints to prevent EEF from moving off-frame in horizontal directions + neutral_xy: [0.25, 0] # (x,y) relative pos values taken in robot base frame from which radius limits will be computed + radius_limit: 0.5 # x-y reaching limit + height_limits: [0.2, 1.5] # min, max height limits + +# task +task: cleaning_cupboards +task_id: 0 +online_sampling: false +target_dist_min: 1.0 +target_dist_max: 10.0 +goal_format: polar +task_obs_dim: 4 + +# reward +reward_type: l2 +success_reward: 10.0 +potential_reward_weight: 1.0 +collision_reward_weight: -0.1 + +# discount factor +discount_factor: 0.99 + +# termination condition +dist_tol: 0.36 # body width +max_step: 500 +max_collisions_allowed: 500 + +# misc config +initial_pos_z_offset: 0.1 +collision_ignore_link_a_ids: [0, 1, 2] # ignore collisions with these robot links + +# sensor spec +output: [proprioception, rgb, highlight, depth, seg, ins_seg, task_obs] +# image +fisheye: false +image_width: 512 +image_height: 512 +vertical_fov: 120 +# depth +depth_low: 0.0 +depth_high: 10.0 + +# sensor noise +depth_noise_rate: 0.0 +scan_noise_rate: 0.0 + +# visual objects +visual_object_at_initial_target_pos: true +target_visual_object_visible_to_agent: false + +# speed limit +hand_threshold: 0.4 +hand_speed: 0.3 +body_speed: 0.3 diff --git a/igibson/configs/behavior_onboard_sensing.yaml b/igibson/examples/configs/behavior_onboard_sensing.yaml similarity index 51% rename from igibson/configs/behavior_onboard_sensing.yaml rename to igibson/examples/configs/behavior_onboard_sensing.yaml index 554c3fea2..90a6cfd95 100644 --- a/igibson/configs/behavior_onboard_sensing.yaml +++ b/igibson/examples/configs/behavior_onboard_sensing.yaml @@ -1,31 +1,44 @@ # scene -scene: igibson scene_id: Beechwood_1_int clutter: false build_graph: true load_texture: true pybullet_load_texture: true -should_open_all_doors: false +should_open_all_doors: true # domain randomization texture_randomization_freq: null object_randomization_freq: null # robot -robot: - name: BehaviorRobot +robot: BehaviorRobot # task task: cleaning_cupboards task_id: 0 -instance_id: 0 online_sampling: false +target_dist_min: 1.0 +target_dist_max: 10.0 +goal_format: polar +task_obs_dim: 4 + +# reward +reward_type: l2 +success_reward: 10.0 +potential_reward_weight: 1.0 +collision_reward_weight: -0.1 # discount factor discount_factor: 0.99 # termination condition +dist_tol: 0.36 # body width max_step: 500 +max_collisions_allowed: 500 + +# misc config +initial_pos_z_offset: 0.1 +collision_ignore_link_a_ids: [0, 1, 2] # ignore collisions with these robot links # sensor spec output: [proprioception, rgb, highlight, depth, seg, ins_seg] @@ -41,3 +54,12 @@ depth_high: 10.0 # sensor noise depth_noise_rate: 0.0 scan_noise_rate: 0.0 + +# visual objects +visual_object_at_initial_target_pos: true +target_visual_object_visible_to_agent: false + +# speed limit +hand_threshold: 0.4 +hand_speed: 0.3 +body_speed: 0.3 diff --git a/igibson/examples/configs/behavior_onboard_sensing_fetch.yaml b/igibson/examples/configs/behavior_onboard_sensing_fetch.yaml new file mode 100644 index 000000000..589efc9b7 --- /dev/null +++ b/igibson/examples/configs/behavior_onboard_sensing_fetch.yaml @@ -0,0 +1,80 @@ +# scene +scene_id: Beechwood_1_int +clutter: false +build_graph: true +load_texture: true +pybullet_load_texture: true +should_open_all_doors: true + +# domain randomization +texture_randomization_freq: null +object_randomization_freq: null + +# robot +robot: FetchGripper +use_ag: true +ag_strict_mode: true +default_arm_pose: diagonal30 +trunk_offset: 0.085 +controller: + mode: pose_delta_ori # options are {pose_delta_ori, position_fixed_ori, position_compliant_ori} + kv_vel: -2.0 + input_max: [1, 1, 1, 1, 1, 1] + input_min: [-1, -1, -1, -1, -1, -1] + output_max: [0.2, 0.2, 0.2, 0.5, 0.5, 0.5] + output_min: [-0.2, -0.2, -0.2, -0.5, -0.5, -0.5] + eef_always_in_frame: false # if true, will add hard workspace left-right constraints to prevent EEF from moving off-frame in horizontal directions + neutral_xy: [0.25, 0] # (x,y) relative pos values taken in robot base frame from which radius limits will be computed + radius_limit: 0.5 # x-y reaching limit + height_limits: [0.2, 1.5] # min, max height limits + +# task +task: cleaning_cupboards +task_id: 0 +online_sampling: false +target_dist_min: 1.0 +target_dist_max: 10.0 +goal_format: polar +task_obs_dim: 4 + +# reward +reward_type: l2 +success_reward: 10.0 +potential_reward_weight: 1.0 +collision_reward_weight: -0.1 + +# discount factor +discount_factor: 0.99 + +# termination condition +dist_tol: 0.36 # body width +max_step: 500 +max_collisions_allowed: 500 + +# misc config +initial_pos_z_offset: 0.1 +collision_ignore_link_a_ids: [0, 1, 2] # ignore collisions with these robot links + +# sensor spec +output: [proprioception, rgb, highlight, depth, seg, ins_seg] +# image +fisheye: false +image_width: 512 +image_height: 512 +vertical_fov: 120 +# depth +depth_low: 0.0 +depth_high: 10.0 + +# sensor noise +depth_noise_rate: 0.0 +scan_noise_rate: 0.0 + +# visual objects +visual_object_at_initial_target_pos: true +target_visual_object_visible_to_agent: false + +# speed limit +hand_threshold: 0.4 +hand_speed: 0.3 +body_speed: 0.3 diff --git a/igibson/configs/fetch_motion_planning.yaml b/igibson/examples/configs/fetch_motion_planning.yaml similarity index 71% rename from igibson/configs/fetch_motion_planning.yaml rename to igibson/examples/configs/fetch_motion_planning.yaml index e2bbfb068..fe5098b47 100644 --- a/igibson/configs/fetch_motion_planning.yaml +++ b/igibson/examples/configs/fetch_motion_planning.yaml @@ -14,30 +14,10 @@ texture_randomization_freq: null object_randomization_freq: null # robot -robot: - name: Fetch - action_type: continuous - action_normalize: true - base_name: null - scale: 1.0 - self_collision: true - rendering_params: null - assisted_grasping_mode: null - rigid_trunk: false - default_trunk_offset: 0.365 - default_arm_pose: diagonal30 - controller_config: - base: - name: DifferentialDriveController - arm: - name: InverseKinematicsController - kv: 2.0 - gripper: - name: ParallelJawGripperController - mode: binary - camera: - name: JointController - use_delta_commands: False +robot: Fetch +wheel_velocity: 0.8 +torso_lift_velocity: 0.8 +arm_velocity: 0.8 # task task: room_rearrangement @@ -97,6 +77,5 @@ depth_noise_rate: 0.0 scan_noise_rate: 0.0 # visual objects -visible_target: true -visible_path: false - +visual_object_at_initial_target_pos: true +target_visual_object_visible_to_agent: false diff --git a/igibson/configs/behavior_segmentation_replay.yaml b/igibson/examples/configs/fetch_reaching.yaml similarity index 77% rename from igibson/configs/behavior_segmentation_replay.yaml rename to igibson/examples/configs/fetch_reaching.yaml index efe83ed54..259397209 100644 --- a/igibson/configs/behavior_segmentation_replay.yaml +++ b/igibson/examples/configs/fetch_reaching.yaml @@ -1,27 +1,26 @@ # scene scene: igibson -scene_id: REPLACE_ME -clutter: false +scene_id: Rs_int build_graph: true load_texture: true pybullet_load_texture: true +trav_map_type: no_obj +trav_map_resolution: 0.1 +trav_map_erosion: 3 should_open_all_doors: true -# Ignore task, use EmptyScene instead -debug: False - # domain randomization texture_randomization_freq: null object_randomization_freq: null # robot -robot: - name: BehaviorRobot +robot: Fetch +wheel_velocity: 0.8 +torso_lift_velocity: 0.8 +arm_velocity: 0.8 # task -task: REPLACE_ME -task_id: REPLACE_ME -online_sampling: False +task: reaching_random target_dist_min: 1.0 target_dist_max: 10.0 goal_format: polar @@ -38,7 +37,7 @@ discount_factor: 0.99 # termination condition dist_tol: 0.36 # body width -max_step: 100 +max_step: 500 max_collisions_allowed: 500 # misc config @@ -46,13 +45,13 @@ initial_pos_z_offset: 0.1 collision_ignore_link_a_ids: [0, 1, 2] # ignore collisions with these robot links # sensor spec -output: [proprioception, task_obs] +output: [task_obs, rgb, depth, scan] # image # Primesense Carmine 1.09 short-range RGBD sensor # http://xtionprolive.com/primesense-carmine-1.09 fisheye: false -image_width: 512 -image_height: 512 +image_width: 128 +image_height: 128 vertical_fov: 90 # depth depth_low: 0.35 @@ -73,10 +72,5 @@ depth_noise_rate: 0.0 scan_noise_rate: 0.0 # visual objects -visible_target: true - - -# speed limit -hand_threshold: 0.4 -hand_speed: 0.3 -body_speed: 0.3 \ No newline at end of file +visual_object_at_initial_target_pos: true +target_visual_object_visible_to_agent: false diff --git a/igibson/configs/fetch_rearrangement.yaml b/igibson/examples/configs/fetch_room_rearrangement.yaml similarity index 71% rename from igibson/configs/fetch_rearrangement.yaml rename to igibson/examples/configs/fetch_room_rearrangement.yaml index c0313fd05..b33408cd4 100644 --- a/igibson/configs/fetch_rearrangement.yaml +++ b/igibson/examples/configs/fetch_room_rearrangement.yaml @@ -14,30 +14,10 @@ texture_randomization_freq: null object_randomization_freq: null # robot -robot: - name: Fetch - action_type: continuous - action_normalize: true - base_name: null - scale: 1.0 - self_collision: true - rendering_params: null - assisted_grasping_mode: null - rigid_trunk: false - default_trunk_offset: 0.365 - default_arm_pose: diagonal30 - controller_config: - base: - name: DifferentialDriveController - arm: - name: InverseKinematicsController - kv: 2.0 - gripper: - name: ParallelJawGripperController - mode: binary - camera: - name: JointController - use_delta_commands: False +robot: Fetch +wheel_velocity: 0.8 +torso_lift_velocity: 0.8 +arm_velocity: 0.8 # task task: room_rearrangement @@ -97,6 +77,5 @@ depth_noise_rate: 0.0 scan_noise_rate: 0.0 # visual objects -visible_target: true -visible_path: false - +visual_object_at_initial_target_pos: true +target_visual_object_visible_to_agent: false diff --git a/igibson/configs/jr2_reaching.yaml b/igibson/examples/configs/jr_reaching.yaml similarity index 74% rename from igibson/configs/jr2_reaching.yaml rename to igibson/examples/configs/jr_reaching.yaml index 3a6beb73f..a9c1b1bf0 100644 --- a/igibson/configs/jr2_reaching.yaml +++ b/igibson/examples/configs/jr_reaching.yaml @@ -14,22 +14,10 @@ texture_randomization_freq: null object_randomization_freq: null # robot -robot: - name: JR2 - action_type: continuous - action_normalize: true - base_name: null - scale: 1.0 - self_collision: false - rendering_params: null - assisted_grasping_mode: null - controller_config: - base: - name: DifferentialDriveController - arm: - name: InverseKinematicsController - gripper: - name: NullGripperController +robot: JR2_Kinova +is_discrete: false +wheel_velocity: 0.3 +arm_velocity: 1.0 # task task: reaching_random @@ -73,5 +61,5 @@ depth_high: 10.0 depth_noise_rate: 0.0 # visual objects -visible_target: true - +visual_object_at_initial_target_pos: true +target_visual_object_visible_to_agent: false diff --git a/igibson/configs/locobot_nav.yaml b/igibson/examples/configs/locobot_point_nav.yaml similarity index 76% rename from igibson/configs/locobot_nav.yaml rename to igibson/examples/configs/locobot_point_nav.yaml index bf9d6eb32..5e0001b2a 100644 --- a/igibson/configs/locobot_nav.yaml +++ b/igibson/examples/configs/locobot_point_nav.yaml @@ -14,20 +14,10 @@ texture_randomization_freq: null object_randomization_freq: null # robot -robot: - name: Locobot - action_type: continuous - action_normalize: true - base_name: null - scale: 1.0 - self_collision: false - rendering_params: null - controller_config: - base: - name: DifferentialDriveController - command_output_limits: - - [-0.5, -1.57] - - [0.5, 1.57] +robot: Locobot +is_discrete: false +linear_velocity: 0.5 +angular_velocity: 1.5707963267948966 # task task: point_nav_random @@ -71,6 +61,5 @@ depth_high: 10.0 depth_noise_rate: 0.0 # visual objects -visible_target: true -visible_path: false - +visual_object_at_initial_target_pos: true +target_visual_object_visible_to_agent: false diff --git a/igibson/examples/configs/njk_re-shelving_library_books_full_obs.yaml b/igibson/examples/configs/njk_re-shelving_library_books_full_obs.yaml new file mode 100644 index 000000000..00e824a0c --- /dev/null +++ b/igibson/examples/configs/njk_re-shelving_library_books_full_obs.yaml @@ -0,0 +1,66 @@ +# scene +scene_id: Pomaria_1_int +clutter: false +build_graph: true +load_texture: true +pybullet_load_texture: true +should_open_all_doors: true + +# domain randomization +texture_randomization_freq: null +object_randomization_freq: null + +# robot +robot: BehaviorRobot + +# task +# task: cleaning_cupboards +task: re-shelving_library_books +task_id: 0 +online_sampling: false +target_dist_min: 1.0 +target_dist_max: 10.0 +goal_format: polar +task_obs_dim: 4 + +# reward +reward_type: l2 +success_reward: 10.0 +potential_reward_weight: 1.0 +collision_reward_weight: -0.1 + +# discount factor +discount_factor: 0.99 + +# termination condition +dist_tol: 0.36 # body width +max_step: 500 +max_collisions_allowed: 500 + +# misc config +initial_pos_z_offset: 0.1 +collision_ignore_link_a_ids: [0, 1, 2] # ignore collisions with these robot links + +# sensor spec +output: [proprioception, rgb, highlight, depth, seg, ins_seg, task_obs] +# image +fisheye: false +image_width: 1280 +image_height: 1280 +vertical_fov: 120 +# depth +depth_low: 0.0 +depth_high: 10.0 + +# sensor noise +depth_noise_rate: 0.0 +scan_noise_rate: 0.0 + +# visual objects +visual_object_at_initial_target_pos: true +target_visual_object_visible_to_agent: false + +# speed limit +hand_threshold: 0.4 +hand_speed: 0.3 +body_speed: 0.3 diff --git a/igibson/examples/configs/njk_sorting_books_full_obs.yaml b/igibson/examples/configs/njk_sorting_books_full_obs.yaml new file mode 100644 index 000000000..19089a3a4 --- /dev/null +++ b/igibson/examples/configs/njk_sorting_books_full_obs.yaml @@ -0,0 +1,66 @@ +# scene +scene_id: Pomaria_1_int +clutter: false +build_graph: true +load_texture: true +pybullet_load_texture: true +should_open_all_doors: true + +# domain randomization +texture_randomization_freq: null +object_randomization_freq: null + +# robot +robot: BehaviorRobot + +# task +# task: cleaning_cupboards +task: sorting_books +task_id: 0 +online_sampling: false +target_dist_min: 1.0 +target_dist_max: 10.0 +goal_format: polar +task_obs_dim: 4 + +# reward +reward_type: l2 +success_reward: 10.0 +potential_reward_weight: 1.0 +collision_reward_weight: -0.1 + +# discount factor +discount_factor: 0.99 + +# termination condition +dist_tol: 0.36 # body width +max_step: 500 +max_collisions_allowed: 500 + +# misc config +initial_pos_z_offset: 0.1 +collision_ignore_link_a_ids: [0, 1, 2] # ignore collisions with these robot links + +# sensor spec +output: [proprioception, rgb, highlight, depth, seg, ins_seg, task_obs] +# image +fisheye: false +image_width: 1280 +image_height: 1280 +vertical_fov: 120 +# depth +depth_low: 0.0 +depth_high: 10.0 + +# sensor noise +depth_noise_rate: 0.0 +scan_noise_rate: 0.0 + +# visual objects +visual_object_at_initial_target_pos: true +target_visual_object_visible_to_agent: false + +# speed limit +hand_threshold: 0.4 +hand_speed: 0.3 +body_speed: 0.3 diff --git a/igibson/configs/turtlebot_static_nav.yaml b/igibson/examples/configs/turtlebot_demo.yaml similarity index 81% rename from igibson/configs/turtlebot_static_nav.yaml rename to igibson/examples/configs/turtlebot_demo.yaml index 6fcaac35a..c8fa5bfb2 100644 --- a/igibson/configs/turtlebot_static_nav.yaml +++ b/igibson/examples/configs/turtlebot_demo.yaml @@ -14,17 +14,9 @@ texture_randomization_freq: null object_randomization_freq: null # robot -robot: - name: Turtlebot - action_type: continuous - action_normalize: true - base_name: null - scale: 1.0 - self_collision: false - rendering_params: null - controller_config: - base: - name: DifferentialDriveController +robot: Turtlebot +is_discrete: false +velocity: 1.0 # task task: point_nav_random @@ -57,8 +49,8 @@ output: [task_obs, rgb, depth, scan] # ASUS Xtion PRO LIVE # https://www.asus.com/us/3D-Sensor/Xtion_PRO_LIVE fisheye: false -image_width: 640 -image_height: 480 +image_width: 160 +image_height: 120 vertical_fov: 45 # depth depth_low: 0.8 @@ -79,5 +71,5 @@ depth_noise_rate: 0.0 scan_noise_rate: 0.0 # visual objects -visible_target: true -visible_path: false +visual_object_at_initial_target_pos: true +target_visual_object_visible_to_agent: false diff --git a/igibson/configs/turtlebot_dynamic_nav.yaml b/igibson/examples/configs/turtlebot_dynamic_nav.yaml similarity index 83% rename from igibson/configs/turtlebot_dynamic_nav.yaml rename to igibson/examples/configs/turtlebot_dynamic_nav.yaml index 7c01c0326..43c7322dd 100644 --- a/igibson/configs/turtlebot_dynamic_nav.yaml +++ b/igibson/examples/configs/turtlebot_dynamic_nav.yaml @@ -14,17 +14,9 @@ texture_randomization_freq: null object_randomization_freq: null # robot -robot: - name: Turtlebot - action_type: continuous - action_normalize: true - base_name: null - scale: 1.0 - self_collision: true - rendering_params: null - controller_config: - base: - name: DifferentialDriveController +robot: Turtlebot +is_discrete: false +velocity: 1.0 # task task: dynamic_nav_random @@ -79,6 +71,5 @@ depth_noise_rate: 0.0 scan_noise_rate: 0.0 # visual objects -visible_target: true -visible_path: false - +visual_object_at_initial_target_pos: true +target_visual_object_visible_to_agent: false diff --git a/igibson/configs/turtlebot_interactive_nav.yaml b/igibson/examples/configs/turtlebot_interactive_nav.yaml similarity index 83% rename from igibson/configs/turtlebot_interactive_nav.yaml rename to igibson/examples/configs/turtlebot_interactive_nav.yaml index 191e088be..f159820d3 100644 --- a/igibson/configs/turtlebot_interactive_nav.yaml +++ b/igibson/examples/configs/turtlebot_interactive_nav.yaml @@ -14,17 +14,9 @@ texture_randomization_freq: null object_randomization_freq: null # robot -robot: - name: Turtlebot - action_type: continuous - action_normalize: true - base_name: null - scale: 1.0 - self_collision: true - rendering_params: null - controller_config: - base: - name: DifferentialDriveController +robot: Turtlebot +is_discrete: false +velocity: 1.0 # task task: interactive_nav_random @@ -79,6 +71,5 @@ depth_noise_rate: 0.0 scan_noise_rate: 0.0 # visual objects -visible_target: true -visible_path: false - +visual_object_at_initial_target_pos: true +target_visual_object_visible_to_agent: false diff --git a/igibson/configs/turtlebot_nav.yaml b/igibson/examples/configs/turtlebot_point_nav.yaml similarity index 81% rename from igibson/configs/turtlebot_nav.yaml rename to igibson/examples/configs/turtlebot_point_nav.yaml index af1300738..3e154ec35 100644 --- a/igibson/configs/turtlebot_nav.yaml +++ b/igibson/examples/configs/turtlebot_point_nav.yaml @@ -14,17 +14,9 @@ texture_randomization_freq: null object_randomization_freq: null # robot -robot: - name: Turtlebot - action_type: continuous - action_normalize: true - base_name: null - scale: 1.0 - self_collision: false - rendering_params: null - controller_config: - base: - name: DifferentialDriveController +robot: Turtlebot +is_discrete: false +velocity: 1.0 # task task: point_nav_random @@ -57,8 +49,8 @@ output: [task_obs, rgb, depth, scan] # ASUS Xtion PRO LIVE # https://www.asus.com/us/3D-Sensor/Xtion_PRO_LIVE fisheye: false -image_width: 640 -image_height: 480 +image_width: 160 +image_height: 120 vertical_fov: 45 # depth depth_low: 0.8 @@ -79,5 +71,5 @@ depth_noise_rate: 0.0 scan_noise_rate: 0.0 # visual objects -visible_target: true -visible_path: false +visual_object_at_initial_target_pos: true +target_visual_object_visible_to_agent: false diff --git a/igibson/configs/turtlebot_stadium_nav.yaml b/igibson/examples/configs/turtlebot_point_nav_stadium.yaml similarity index 83% rename from igibson/configs/turtlebot_stadium_nav.yaml rename to igibson/examples/configs/turtlebot_point_nav_stadium.yaml index 0ca201411..ee002a49d 100644 --- a/igibson/configs/turtlebot_stadium_nav.yaml +++ b/igibson/examples/configs/turtlebot_point_nav_stadium.yaml @@ -13,17 +13,9 @@ texture_randomization_freq: null object_randomization_freq: null # robot -robot: - name: Turtlebot - action_type: continuous - action_normalize: true - base_name: null - scale: 1.0 - self_collision: true - rendering_params: null - controller_config: - base: - name: DifferentialDriveController +robot: Turtlebot +is_discrete: false +velocity: 1.0 # task task: point_nav_random @@ -78,6 +70,5 @@ depth_noise_rate: 0.0 scan_noise_rate: 0.0 # visual objects -visible_target: true -visible_path: false - +visual_object_at_initial_target_pos: true +target_visual_object_visible_to_agent: false diff --git a/igibson/examples/configs/wbm3_modifiable_full_obs.yaml b/igibson/examples/configs/wbm3_modifiable_full_obs.yaml new file mode 100644 index 000000000..fb427ef67 --- /dev/null +++ b/igibson/examples/configs/wbm3_modifiable_full_obs.yaml @@ -0,0 +1,66 @@ +# scene +scene_id: scene_placeholder +clutter: false +build_graph: true +load_texture: true +pybullet_load_texture: true +should_open_all_doors: true + +# domain randomization +texture_randomization_freq: null +object_randomization_freq: null + +# robot +robot: BehaviorRobot + +# task +# task: cleaning_cupboards +task: task_placeholder +task_id: 0 +online_sampling: online_sampling_placeholder +target_dist_min: 1.0 +target_dist_max: 10.0 +goal_format: polar +task_obs_dim: 4 + +# reward +reward_type: l2 +success_reward: 10.0 +potential_reward_weight: 1.0 +collision_reward_weight: -0.1 + +# discount factor +discount_factor: 0.99 + +# termination condition +dist_tol: 0.36 # body width +max_step: 500 +max_collisions_allowed: 500 + +# misc config +initial_pos_z_offset: 0.1 +collision_ignore_link_a_ids: [0, 1, 2] # ignore collisions with these robot links + +# sensor spec +output: [proprioception, rgb, highlight, depth, seg, ins_seg, task_obs] +# image +fisheye: false +image_width: 1280 +image_height: 1280 +vertical_fov: 120 +# depth +depth_low: 0.0 +depth_high: 10.0 + +# sensor noise +depth_noise_rate: 0.0 +scan_noise_rate: 0.0 + +# visual objects +visual_object_at_initial_target_pos: true +target_visual_object_visible_to_agent: false + +# speed limit +hand_threshold: 0.4 +hand_speed: 0.3 +body_speed: 0.3 diff --git a/igibson/examples/configs/wbm3_modifiable_full_obs.yaml_tmp b/igibson/examples/configs/wbm3_modifiable_full_obs.yaml_tmp new file mode 100644 index 000000000..ddddc5a1d --- /dev/null +++ b/igibson/examples/configs/wbm3_modifiable_full_obs.yaml_tmp @@ -0,0 +1,66 @@ +# scene +scene_id: Ihlen_1_int +clutter: false +build_graph: true +load_texture: true +pybullet_load_texture: true +should_open_all_doors: true + +# domain randomization +texture_randomization_freq: null +object_randomization_freq: null + +# robot +robot: BehaviorRobot + +# task +# task: cleaning_cupboards +task: throwing_away_leftovers +task_id: 0 +online_sampling: False +target_dist_min: 1.0 +target_dist_max: 10.0 +goal_format: polar +task_obs_dim: 4 + +# reward +reward_type: l2 +success_reward: 10.0 +potential_reward_weight: 1.0 +collision_reward_weight: -0.1 + +# discount factor +discount_factor: 0.99 + +# termination condition +dist_tol: 0.36 # body width +max_step: 500 +max_collisions_allowed: 500 + +# misc config +initial_pos_z_offset: 0.1 +collision_ignore_link_a_ids: [0, 1, 2] # ignore collisions with these robot links + +# sensor spec +output: [proprioception, rgb, highlight, depth, seg, ins_seg, task_obs] +# image +fisheye: false +image_width: 1280 +image_height: 1280 +vertical_fov: 120 +# depth +depth_low: 0.0 +depth_high: 10.0 + +# sensor noise +depth_noise_rate: 0.0 +scan_noise_rate: 0.0 + +# visual objects +visual_object_at_initial_target_pos: true +target_visual_object_visible_to_agent: false + +# speed limit +hand_threshold: 0.4 +hand_speed: 0.3 +body_speed: 0.3 diff --git a/igibson/examples/object_states/__init__.py b/igibson/examples/demo/__init__.py similarity index 100% rename from igibson/examples/object_states/__init__.py rename to igibson/examples/demo/__init__.py diff --git a/igibson/examples/demo/cleaning_demo.py b/igibson/examples/demo/cleaning_demo.py new file mode 100644 index 000000000..f6a8e597e --- /dev/null +++ b/igibson/examples/demo/cleaning_demo.py @@ -0,0 +1,43 @@ +from igibson import object_states +from igibson.object_states.factory import prepare_object_states +from igibson.objects.ycb_object import YCBObject +from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene +from igibson.simulator import Simulator + + +def main(): + s = Simulator(mode="gui", image_width=512, image_height=512, device_idx=0) + scene = InteractiveIndoorScene("Rs_int", texture_randomization=False, object_randomization=False) + s.import_ig_scene(scene) + + block = YCBObject(name="036_wood_block") + block.abilities = ["soakable", "cleaningTool"] + prepare_object_states(block, abilities={"soakable": {}, "cleaningTool": {}}) + s.import_object(block) + block.set_position([1, 1, 1.8]) + + # Set everything that can go dirty. + stateful_objects = set( + scene.get_objects_with_state(object_states.Dusty) + + scene.get_objects_with_state(object_states.Stained) + + scene.get_objects_with_state(object_states.WaterSource) + ) + for obj in stateful_objects: + if object_states.Dusty in obj.states: + obj.states[object_states.Dusty].set_value(True) + + if object_states.Stained in obj.states: + obj.states[object_states.Stained].set_value(True) + + if object_states.WaterSource in obj.states and object_states.ToggledOn in obj.states: + obj.states[object_states.ToggledOn].set_value(True) + + try: + while True: + s.step() + finally: + s.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/demo/cleaning_demo_loader.py b/igibson/examples/demo/cleaning_demo_loader.py new file mode 100644 index 000000000..f82080708 --- /dev/null +++ b/igibson/examples/demo/cleaning_demo_loader.py @@ -0,0 +1,26 @@ +from igibson import object_states +from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene +from igibson.simulator import Simulator + + +def main(): + s = Simulator(mode="gui", device_idx=0) + scene = InteractiveIndoorScene("Rs_int", urdf_file="cleaning_demo", merge_fixed_links=True) + s.import_ig_scene(scene) + + water_sources = set(scene.get_objects_with_state(object_states.WaterSource)) & set( + scene.get_objects_with_state(object_states.ToggledOn) + ) + for obj in water_sources: + obj.states[object_states.ToggledOn].set_value(True) + + # Let the user view the frozen scene in the UI for purposes of comparison. + try: + while True: + pass + finally: + s.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/demo/cleaning_demo_saver.py b/igibson/examples/demo/cleaning_demo_saver.py new file mode 100644 index 000000000..4e90137aa --- /dev/null +++ b/igibson/examples/demo/cleaning_demo_saver.py @@ -0,0 +1,48 @@ +from igibson import object_states +from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene +from igibson.simulator import Simulator + + +def main(): + s = Simulator(mode="gui", device_idx=0) + scene = InteractiveIndoorScene( + "Rs_int", texture_randomization=False, object_randomization=False, merge_fixed_links=False + ) + s.import_ig_scene(scene) + + # Set everything that can go dirty. + stateful_objects = set( + scene.get_objects_with_state(object_states.Dusty) + + scene.get_objects_with_state(object_states.Stained) + + scene.get_objects_with_state(object_states.WaterSource) + + scene.get_objects_with_state(object_states.Open) + ) + for obj in stateful_objects: + if object_states.Dusty in obj.states: + obj.states[object_states.Dusty].set_value(True) + + if object_states.Stained in obj.states: + obj.states[object_states.Stained].set_value(True) + + if object_states.Open in obj.states: + obj.states[object_states.Open].set_value(True) + + if object_states.WaterSource in obj.states: + obj.states[object_states.ToggledOn].set_value(True) + + # Take some steps so water drops appear + for i in range(100): + s.step() + + scene.save_modified_urdf("cleaning_demo") + + # Let the user view the frozen scene in the UI for purposes of comparison. + try: + while True: + pass + finally: + s.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/demo/demo_static.py b/igibson/examples/demo/demo_static.py new file mode 100644 index 000000000..a2f71bda1 --- /dev/null +++ b/igibson/examples/demo/demo_static.py @@ -0,0 +1,33 @@ +import os + +import igibson +from igibson.robots.turtlebot_robot import Turtlebot +from igibson.scenes.gibson_indoor_scene import StaticIndoorScene +from igibson.simulator import Simulator +from igibson.utils.assets_utils import download_assets, download_demo_data +from igibson.utils.utils import parse_config + + +class DemoStatic(object): + def __init__(self): + download_assets() + download_demo_data() + + def run_demo(self): + config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_demo.yaml")) + + s = Simulator(mode="gui", image_width=700, image_height=700) + scene = StaticIndoorScene("Rs", pybullet_load_texture=True) + s.import_scene(scene) + turtlebot = Turtlebot(config) + s.import_robot(turtlebot) + + for i in range(1000): + turtlebot.apply_action([0.1, 0.5]) + s.step() + + s.disconnect() + + +if __name__ == "__main__": + DemoStatic().run_demo() diff --git a/igibson/examples/demo/env_example.py b/igibson/examples/demo/env_example.py new file mode 100644 index 000000000..4c665e32b --- /dev/null +++ b/igibson/examples/demo/env_example.py @@ -0,0 +1,25 @@ +import logging +import os + +import igibson +from igibson.envs.igibson_env import iGibsonEnv +from igibson.render.profiler import Profiler + + +def main(): + config_filename = os.path.join(igibson.example_config_path, "turtlebot_demo.yaml") + env = iGibsonEnv(config_file=config_filename, mode="gui") + for j in range(10): + env.reset() + for i in range(100): + with Profiler("Environment action step"): + action = env.action_space.sample() + state, reward, done, info = env.step(action) + if done: + logging.info("Episode finished after {} timesteps".format(i + 1)) + break + env.close() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/demo/env_interactive_example.py b/igibson/examples/demo/env_interactive_example.py new file mode 100644 index 000000000..e5e68f471 --- /dev/null +++ b/igibson/examples/demo/env_interactive_example.py @@ -0,0 +1,25 @@ +import logging +import os + +import igibson +from igibson.envs.igibson_env import iGibsonEnv +from igibson.render.profiler import Profiler + + +def main(): + config_filename = os.path.join(igibson.example_config_path, "turtlebot_point_nav.yaml") + env = iGibsonEnv(config_file=config_filename, mode="gui") + for j in range(10): + env.reset() + for i in range(100): + with Profiler("Environment action step"): + action = env.action_space.sample() + state, reward, done, info = env.step(action) + if done: + logging.info("Episode finished after {} timesteps".format(i + 1)) + break + env.close() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/robots/fetch_gripper_robot_example.py b/igibson/examples/demo/fetch_gripper_robot_example.py similarity index 84% rename from igibson/examples/robots/fetch_gripper_robot_example.py rename to igibson/examples/demo/fetch_gripper_robot_example.py index 38b6235e3..f1f49897f 100644 --- a/igibson/examples/robots/fetch_gripper_robot_example.py +++ b/igibson/examples/demo/fetch_gripper_robot_example.py @@ -5,21 +5,21 @@ from IPython import embed import igibson -from igibson.robots.fetch import Fetch +from igibson.robots.fetch_gripper_robot import FetchGripper from igibson.scenes.empty_scene import EmptyScene from igibson.simulator import Simulator from igibson.utils.utils import parse_config if __name__ == "__main__": - s = Simulator(mode="headless", use_pb_gui=True) + s = Simulator(mode="pbgui") scene = EmptyScene() s.import_scene(scene) config = parse_config(os.path.join(igibson.example_config_path, "behavior_onboard_sensing_fetch.yaml")) - robot = Fetch(s, config) + robot = FetchGripper(s, config) s.import_robot(robot) - robot.reset() + robot.robot_specific_reset() action_dim = 11 for i in range(action_dim): embed() diff --git a/igibson/examples/demo/igsdf_example.py b/igibson/examples/demo/igsdf_example.py new file mode 100644 index 000000000..3a04894c1 --- /dev/null +++ b/igibson/examples/demo/igsdf_example.py @@ -0,0 +1,26 @@ +import argparse + +from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings +from igibson.render.profiler import Profiler +from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene +from igibson.simulator import Simulator + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--scene", type=str, help="Name of the scene in the iG Dataset") + args = parser.parse_args() + settings = MeshRendererSettings(enable_shadow=True, msaa=False) + s = Simulator(mode="gui", image_width=256, image_height=256, rendering_settings=settings) + + scene = InteractiveIndoorScene(args.scene) + s.import_ig_scene(scene) + + for i in range(10000): + with Profiler("Simulator step"): + s.step() + s.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/objects/obj_rotating_video_gen.py b/igibson/examples/demo/igsdf_obj_viz.py similarity index 87% rename from igibson/examples/objects/obj_rotating_video_gen.py rename to igibson/examples/demo/igsdf_obj_viz.py index b591c6bbe..41193f639 100644 --- a/igibson/examples/objects/obj_rotating_video_gen.py +++ b/igibson/examples/demo/igsdf_obj_viz.py @@ -1,16 +1,16 @@ -import argparse -import logging import os import subprocess +import sys import numpy as np from PIL import Image import igibson + +# from igibson.utils.assets_utils import get_model_path from igibson.objects.articulated_object import ArticulatedObject from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings from igibson.render.profiler import Profiler -from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene from igibson.simulator import Simulator @@ -77,20 +77,9 @@ def load_obj_np( def main(): - """ - Generates videos rotating a camera around an object - """ - parser = argparse.ArgumentParser() - parser.add_argument( - "--model_path", - default=os.path.join(igibson.ig_dataset_path, "objects", "rocking_chair", "rocking_chair_0002"), - help="Model to load", - ) - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) global _mouse_ix, _mouse_iy, down, view_direction - args = parser.parse_args() - model_path = args.model_path + model_path = sys.argv[1] print(model_path) model_id = os.path.basename(model_path) @@ -102,10 +91,6 @@ def main(): ) s = Simulator(mode="headless", image_width=1800, image_height=1200, vertical_fov=70, rendering_settings=settings) - scene = InteractiveIndoorScene( - "Rs_int", texture_randomization=False, load_object_categories=["window"], object_randomization=False - ) - s.import_scene(scene) s.renderer.set_light_position_direction([0, 0, 10], [0, 0, 0]) diff --git a/igibson/examples/scenes/scene_tour_video_gen.py b/igibson/examples/demo/igsdf_tour.py similarity index 88% rename from igibson/examples/scenes/scene_tour_video_gen.py rename to igibson/examples/demo/igsdf_tour.py index 9edc4eb6c..71025526d 100644 --- a/igibson/examples/scenes/scene_tour_video_gen.py +++ b/igibson/examples/demo/igsdf_tour.py @@ -1,9 +1,7 @@ import argparse -import logging import os import random import subprocess -from sys import platform import numpy as np from PIL import Image @@ -17,13 +15,8 @@ def main(): - """ - Generates videos navigating in the iG scenes - Loads an iG scene, predefined paths and produces a video. Alternate random textures and/or objects, on demand. - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) parser = argparse.ArgumentParser() - parser.add_argument("--scene", type=str, help="Name of the scene in the iG Dataset", default="Rs_int") + parser.add_argument("--scene", type=str, help="Name of the scene in the iG Dataset") parser.add_argument("--save_dir", type=str, help="Directory to save the frames.", default="misc") parser.add_argument("--seed", type=int, default=15, help="Random seed.") parser.add_argument("--domain_rand", dest="domain_rand", action="store_true") @@ -48,7 +41,6 @@ def main(): msaa=True, skybox_size=36.0, light_dimming_factor=0.8, - texture_scale=0.5 if platform == "darwin" else 1.0, # Reduce scale if in Mac ) s = Simulator(mode="headless", image_width=1080, image_height=720, vertical_fov=60, rendering_settings=settings) @@ -58,9 +50,8 @@ def main(): args.scene, texture_randomization=args.domain_rand, object_randomization=args.object_rand ) - s.import_scene(scene) + s.import_ig_scene(scene) - # Load trajectory path traj_path = os.path.join(get_ig_scene_path(args.scene), "misc", "tour_cam_trajectory.txt") save_dir = os.path.join(get_ig_scene_path(args.scene), args.save_dir) os.makedirs(save_dir, exist_ok=True) diff --git a/igibson/examples/robots/ik_example.py b/igibson/examples/demo/ik_example.py similarity index 94% rename from igibson/examples/robots/ik_example.py rename to igibson/examples/demo/ik_example.py index ac4d615dc..c9de39f7c 100644 --- a/igibson/examples/robots/ik_example.py +++ b/igibson/examples/demo/ik_example.py @@ -1,5 +1,3 @@ -import os - import numpy as np import pybullet as p @@ -13,7 +11,7 @@ set_joint_positions, ) from igibson.render.profiler import Profiler -from igibson.robots.fetch import Fetch +from igibson.robots.fetch_robot import Fetch from igibson.scenes.empty_scene import EmptyScene from igibson.simulator import Simulator from igibson.utils.utils import parse_config @@ -21,13 +19,13 @@ def main(): config = parse_config(os.path.join(igibson.example_config_path, "fetch_reaching.yaml")) - s = Simulator(mode="gui_interactive", use_pb_gui=True, physics_timestep=1 / 240.0) + s = Simulator(mode="gui", physics_timestep=1 / 240.0) scene = EmptyScene() s.import_scene(scene) fetch = Fetch(config) s.import_robot(fetch) - robot_id = fetch.get_body_id() + robot_id = fetch.robot_ids[0] arm_joints = joints_from_names( robot_id, @@ -92,7 +90,7 @@ def accurateCalculateInverseKinematics(robotid, endEffectorId, targetPos, thresh threshold = 0.01 maxIter = 100 joint_pos = accurateCalculateInverseKinematics( - robot_id, fetch.links["gripper_link"].body_part_index, [x, y, z], threshold, maxIter + robot_id, fetch.parts["gripper_link"].body_part_index, [x, y, z], threshold, maxIter )[2:10] s.step() diff --git a/igibson/examples/observations/generate_lidar_velodyne.py b/igibson/examples/demo/lidar_velodyne_example.py similarity index 53% rename from igibson/examples/observations/generate_lidar_velodyne.py rename to igibson/examples/demo/lidar_velodyne_example.py index 837cf25ad..82a9a28f1 100644 --- a/igibson/examples/observations/generate_lidar_velodyne.py +++ b/igibson/examples/demo/lidar_velodyne_example.py @@ -1,43 +1,33 @@ -import logging import os import matplotlib.pyplot as plt +import numpy as np from mpl_toolkits.mplot3d import Axes3D import igibson +from igibson.objects.ycb_object import YCBObject from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings -from igibson.robots.turtlebot import Turtlebot +from igibson.render.profiler import Profiler +from igibson.robots.turtlebot_robot import Turtlebot from igibson.scenes.gibson_indoor_scene import StaticIndoorScene from igibson.simulator import Simulator from igibson.utils.utils import parse_config def main(): - """ - Example of rendering and visualizing velodyne lidar signals - Loads Rs (non interactive) and a robot and renders a velodyne signal from the robot's camera - It plots the velodyne point cloud with matplotlib - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml")) - settings = MeshRendererSettings(enable_shadow=False, msaa=False, texture_scale=0.01) + config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_demo.yaml")) + settings = MeshRendererSettings() s = Simulator(mode="headless", image_width=256, image_height=256, rendering_settings=settings) - scene = StaticIndoorScene("Rs", build_graph=True) + scene = StaticIndoorScene("Rs", build_graph=True, pybullet_load_texture=True) s.import_scene(scene) - robot_config = config["robot"] - robot_config.pop("name") - turtlebot = Turtlebot(**robot_config) + turtlebot = Turtlebot(config) s.import_robot(turtlebot) turtlebot.apply_action([0.1, -0.1]) s.step() - - # Get velodyne lidar lidar = s.renderer.get_lidar_all() - logging.info("Dimensions of the lidar observation: {}".format(lidar.shape)) - - # Visualize velodyne lidar + print(lidar.shape) fig = plt.figure() ax = Axes3D(fig) ax.scatter(lidar[:, 0], lidar[:, 1], lidar[:, 2], s=3) diff --git a/igibson/examples/renderer/mesh_renderer_example.py b/igibson/examples/demo/mesh_renderer_example.py similarity index 68% rename from igibson/examples/renderer/mesh_renderer_example.py rename to igibson/examples/demo/mesh_renderer_example.py index cce5a47c7..64c296ad5 100644 --- a/igibson/examples/renderer/mesh_renderer_example.py +++ b/igibson/examples/demo/mesh_renderer_example.py @@ -1,4 +1,3 @@ -import logging import os import sys @@ -11,39 +10,31 @@ def main(): - """ - Creates renderer and renders RGB images in Rs (no interactive). No physics. - The camera view can be controlled. - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) global _mouse_ix, _mouse_iy, down, view_direction - # If a model is given, we load it, otherwise we load Rs mesh (non interactive) if len(sys.argv) > 1: model_path = sys.argv[1] else: model_path = os.path.join(get_scene_path("Rs"), "mesh_z_up.obj") - # Create renderer object and load the scene model renderer = MeshRenderer(width=512, height=512) renderer.load_object(model_path) - renderer.add_instance_group([0]) - # Print some information about the loaded model - logging.info(renderer.visual_objects, renderer.instances) - logging.info(renderer.material_idx_to_material_instance_mapping, renderer.shape_material_idx) + renderer.add_instance(0) + print(renderer.visual_objects, renderer.instances) + print(renderer.material_idx_to_material_instance_mapping, renderer.shape_material_idx) - # Create a simple viewer with OpenCV and a keyboard navigation px = 0 py = 0.2 + camera_pose = np.array([px, py, 0.5]) view_direction = np.array([0, -1, -1]) renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1]) renderer.set_fov(90) + _mouse_ix, _mouse_iy = -1, -1 down = False - # Define the function callback for OpenCV events on the window def change_dir(event, x, y, flags, param): global _mouse_ix, _mouse_iy, down, view_direction if event == cv2.EVENT_LBUTTONDOWN: @@ -61,14 +52,13 @@ def change_dir(event, x, y, flags, param): elif event == cv2.EVENT_LBUTTONUP: down = False - cv2.namedWindow("Viewer") - cv2.setMouseCallback("Viewer", change_dir) + cv2.namedWindow("test") + cv2.setMouseCallback("test", change_dir) - # Move camera and render while True: with Profiler("Render"): frame = renderer.render(modes=("rgb")) - cv2.imshow("Viewer", cv2.cvtColor(np.concatenate(frame, axis=1), cv2.COLOR_RGB2BGR)) + cv2.imshow("test", cv2.cvtColor(np.concatenate(frame, axis=1), cv2.COLOR_RGB2BGR)) q = cv2.waitKey(1) if q == ord("w"): px += 0.01 @@ -83,7 +73,14 @@ def change_dir(event, x, y, flags, param): camera_pose = np.array([px, py, 0.5]) renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1]) - # Cleanup + # start = time.time() + + # for i in range(100): + # frame = renderer.render(modes=('rgb', 'normal', '3d')) + + # elapsed = time.time() - start + # print("num objects {} fps {}".format(len(renderer.instances), 100/elapsed)) + renderer.release() diff --git a/igibson/examples/demo/mesh_renderer_example_pano.py b/igibson/examples/demo/mesh_renderer_example_pano.py new file mode 100644 index 000000000..4b1d4a6e8 --- /dev/null +++ b/igibson/examples/demo/mesh_renderer_example_pano.py @@ -0,0 +1,45 @@ +import os +import sys + +import matplotlib.pyplot as plt +import numpy as np + +from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer +from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings +from igibson.utils.assets_utils import get_scene_path + + +def main(): + global _mouse_ix, _mouse_iy, down, view_direction + + if len(sys.argv) > 1: + model_path = sys.argv[1] + else: + model_path = os.path.join(get_scene_path("Rs"), "mesh_z_up.obj") + + settings = MeshRendererSettings(enable_pbr=False) + renderer = MeshRenderer(width=512, height=512, rendering_settings=settings) + renderer.load_object(model_path) + + renderer.add_instance(0) + print(renderer.visual_objects, renderer.instances) + print(renderer.material_idx_to_material_instance_mapping, renderer.shape_material_idx) + + px = 0 + py = 0.2 + + camera_pose = np.array([px, py, 0.5]) + view_direction = np.array([0, -1, -0.3]) + renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1]) + renderer.set_fov(90) + + img = renderer.get_equi() + print(img.shape) + plt.imshow(img) + plt.show() + + renderer.release() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/renderer/mesh_renderer_pbr_example.py b/igibson/examples/demo/mesh_renderer_example_pbr.py similarity index 77% rename from igibson/examples/renderer/mesh_renderer_pbr_example.py rename to igibson/examples/demo/mesh_renderer_example_pbr.py index 43db4e770..d119e6d7c 100644 --- a/igibson/examples/renderer/mesh_renderer_pbr_example.py +++ b/igibson/examples/demo/mesh_renderer_example_pbr.py @@ -1,11 +1,9 @@ -import logging import os import sys import cv2 import numpy as np -import igibson from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings from igibson.render.profiler import Profiler @@ -13,8 +11,7 @@ def load_obj_np(filename_obj, normalization=False, texture_size=4, texture_wrapping="REPEAT", use_bilinear=True): - """ - Load Wavefront OBJ file into numpy array + """Load Wavefront .obj file into numpy array This function only supports vertices (v x x x) and faces (f x x x). """ # load vertices @@ -43,62 +40,57 @@ def load_obj_np(filename_obj, normalization=False, texture_size=4, texture_wrapp v2 = int(vs[i + 2].split("/")[0]) faces.append((v0, v1, v2)) faces = np.vstack(faces).astype(np.int32) - 1 - assert normalization is False + + assert normalization is False # Since I commented out the block below + return vertices, faces def main(): - """ - Minimal example of use of the PBR renderer. Loads Rs_int (interactive) or one object. - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) global _mouse_ix, _mouse_iy, down, view_direction if len(sys.argv) > 1: model_path = sys.argv[1] else: - model_path = os.path.join(igibson.ig_dataset_path, "scenes", "Rs_int", "shape", "visual") + model_path = os.path.join(get_scene_path("Rs_int"), "mesh_z_up.obj") settings = MeshRendererSettings(msaa=True, enable_shadow=True) renderer = MeshRenderer(width=512, height=512, vertical_fov=70, rendering_settings=settings) renderer.set_light_position_direction([0, 0, 10], [0, 0, 0]) i = 0 - v = [] - # If a model path is given and it is an OBJ file we load it + v = [] for fn in os.listdir(model_path): if fn.endswith("obj"): vertices, faces = load_obj_np(os.path.join(model_path, fn)) v.append(vertices) v = np.vstack(v) - logging.info("Number of vertices {}".format(v.shape)) + print(v.shape) xlen = np.max(v[:, 0]) - np.min(v[:, 0]) ylen = np.max(v[:, 1]) - np.min(v[:, 1]) scale = 2.0 / (max(xlen, ylen)) - # If a model path is given and it is an OBJ file we load it for fn in os.listdir(model_path): if fn.endswith("obj"): renderer.load_object(os.path.join(model_path, fn), scale=[scale, scale, scale]) - renderer.add_instance_group([i]) + renderer.add_instance(i) i += 1 - # Logging some info - logging.info(renderer.visual_objects, renderer.instances) - logging.info(renderer.material_idx_to_material_instance_mapping, renderer.shape_material_idx) + print(renderer.visual_objects, renderer.instances) + print(renderer.material_idx_to_material_instance_mapping, renderer.shape_material_idx) - # Create a simple viewer with OpenCV px = 1 py = 1 pz = 1 + camera_pose = np.array([px, py, pz]) view_direction = np.array([-1, -1, -1]) renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1]) + _mouse_ix, _mouse_iy = -1, -1 down = False - # Callback function for OpenCV viewer def change_dir(event, x, y, flags, param): global _mouse_ix, _mouse_iy, down, view_direction if event == cv2.EVENT_LBUTTONDOWN: @@ -116,14 +108,13 @@ def change_dir(event, x, y, flags, param): elif event == cv2.EVENT_LBUTTONUP: down = False - cv2.namedWindow("Viewer") - cv2.setMouseCallback("Viewer", change_dir) + cv2.namedWindow("test") + cv2.setMouseCallback("test", change_dir) - # Move camera and render while True: with Profiler("Render"): frame = renderer.render(modes=("rgb", "normal", "seg", "ins_seg")) - cv2.imshow("Viewer", cv2.cvtColor(np.concatenate(frame, axis=1), cv2.COLOR_RGB2BGR)) + cv2.imshow("test", cv2.cvtColor(np.concatenate(frame, axis=1), cv2.COLOR_RGB2BGR)) q = cv2.waitKey(1) if q == ord("w"): diff --git a/igibson/examples/renderer/mesh_renderer_gpu_example.py b/igibson/examples/demo/mesh_renderer_gpu_example.py similarity index 87% rename from igibson/examples/renderer/mesh_renderer_gpu_example.py rename to igibson/examples/demo/mesh_renderer_gpu_example.py index bd9af9369..201a9c93c 100644 --- a/igibson/examples/renderer/mesh_renderer_gpu_example.py +++ b/igibson/examples/demo/mesh_renderer_gpu_example.py @@ -1,5 +1,4 @@ import os -import platform import sys import matplotlib.pyplot as plt @@ -11,10 +10,6 @@ def main(): - if platform.system() != "Linux": - print("Rendering to pytorch tensor is only available on Linux.") - exit(1) - if len(sys.argv) > 1: model_path = sys.argv[1] else: @@ -22,7 +17,7 @@ def main(): renderer = MeshRendererG2G(width=512, height=512, device_idx=0) renderer.load_object(model_path) - renderer.add_instance_group([0]) + renderer.add_instance(0) print(renderer.visual_objects, renderer.instances) print(renderer.material_idx_to_material_instance_mapping, renderer.shape_material_idx) diff --git a/igibson/examples/renderer/mesh_renderer_simple_example.py b/igibson/examples/demo/mesh_renderer_simple_example.py similarity index 60% rename from igibson/examples/renderer/mesh_renderer_simple_example.py rename to igibson/examples/demo/mesh_renderer_simple_example.py index 20aff6f7d..e33d47bd5 100644 --- a/igibson/examples/renderer/mesh_renderer_simple_example.py +++ b/igibson/examples/demo/mesh_renderer_simple_example.py @@ -1,4 +1,3 @@ -import logging import os import sys @@ -10,13 +9,6 @@ def main(): - """ - Minimal example of use of the renderer. Loads Rs (non interactive), renders one set of images (RGB, normals, - 3D points (as depth)), shows them. - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - - # If a model is given, we load it, otherwise we load Rs mesh (non interactive) if len(sys.argv) > 1: model_path = sys.argv[1] else: @@ -24,18 +16,12 @@ def main(): renderer = MeshRenderer(width=512, height=512) renderer.load_object(model_path) - renderer.add_instance_group([0]) + renderer.add_instance(0) camera_pose = np.array([0, 0, 1.2]) view_direction = np.array([1, 0, 0]) renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1]) renderer.set_fov(90) frames = renderer.render(modes=("rgb", "normal", "3d")) - - # Render 3d points as depth map - depth = np.linalg.norm(frames[2][:, :, :3], axis=2) - depth /= depth.max() - frames[2][:, :, :3] = depth[..., None] - frames = cv2.cvtColor(np.concatenate(frames, axis=1), cv2.COLOR_RGB2BGR) cv2.imshow("image", frames) cv2.waitKey(0) diff --git a/igibson/examples/robots/motion_planning_example.py b/igibson/examples/demo/motion_planning_example.py similarity index 83% rename from igibson/examples/robots/motion_planning_example.py rename to igibson/examples/demo/motion_planning_example.py index b37feab3e..81a17bb2a 100644 --- a/igibson/examples/robots/motion_planning_example.py +++ b/igibson/examples/demo/motion_planning_example.py @@ -32,9 +32,9 @@ def run_example(args): parser.add_argument( "--mode", "-m", - choices=["headless", "headless_tensor", "gui_non_interactive", "gui_interactive"], - default="gui_interactive", - help="which mode for simulation (default: gui_interactive)", + choices=["headless", "gui", "iggui"], + default="iggui", + help="which mode for simulation (default: iggui)", ) args = parser.parse_args() diff --git a/igibson/examples/demo/mouse_interaction.py b/igibson/examples/demo/mouse_interaction.py new file mode 100644 index 000000000..ef072e264 --- /dev/null +++ b/igibson/examples/demo/mouse_interaction.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python +import argparse +import os +import time + +import matplotlib.pyplot as plt +import numpy as np +import pybullet as p + +import igibson +from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings +from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene +from igibson.simulator import Simulator +from igibson.utils.assets_utils import get_3dfront_scene_path, get_cubicasa_scene_path, get_ig_scene_path + +# human interaction demo + + +def test_import_igsdf(scene_name, scene_source): + hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr") + hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr") + + if scene_source == "IG": + scene_dir = get_ig_scene_path(scene_name) + elif scene_source == "CUBICASA": + scene_dir = get_cubicasa_scene_path(scene_name) + else: + scene_dir = get_3dfront_scene_path(scene_name) + + light_modulation_map_filename = os.path.join(scene_dir, "layout", "floor_lighttype_0.png") + background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg") + + scene = InteractiveIndoorScene( + scene_name, texture_randomization=False, object_randomization=False, scene_source=scene_source + ) + + settings = MeshRendererSettings( + env_texture_filename=hdr_texture, + env_texture_filename2=hdr_texture2, + env_texture_filename3=background_texture, + light_modulation_map_filename=light_modulation_map_filename, + enable_shadow=True, + msaa=True, + light_dimming_factor=1.0, + optimized=True, + ) + s = Simulator(mode="iggui", image_width=960, image_height=720, device_idx=0, rendering_settings=settings) + + s.import_ig_scene(scene) + fpss = [] + + np.random.seed(0) + _, (px, py, pz) = scene.get_random_point() + s.viewer.px = px + s.viewer.py = py + s.viewer.pz = 1.7 + s.viewer.update() + + for i in range(3000): + if i == 2500: + logId = p.startStateLogging(loggingType=p.STATE_LOGGING_PROFILE_TIMINGS, fileName="trace_beechwood") + start = time.time() + s.step() + end = time.time() + print("Elapsed time: ", end - start) + print("Frequency: ", 1 / (end - start)) + fpss.append(1 / (end - start)) + p.stopStateLogging(logId) + s.disconnect() + print("end") + + plt.plot(fpss) + plt.show() + + +def main(): + parser = argparse.ArgumentParser(description="Open a scene with iGibson interactive viewer.") + parser.add_argument("--scene", dest="scene_name", type=str, default="Rs_int", help="The name of the scene to load") + parser.add_argument( + "--source", + dest="scene_source", + type=str, + default="IG", + help="The name of the source dataset, among [IG,CUBICASA,THREEDFRONT]", + ) + args = parser.parse_args() + test_import_igsdf(args.scene_name, args.scene_source) + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/demo/object_example.py b/igibson/examples/demo/object_example.py new file mode 100644 index 000000000..f6dca1f68 --- /dev/null +++ b/igibson/examples/demo/object_example.py @@ -0,0 +1,43 @@ +import os +import time + +import pybullet as p +import pybullet_data + +import igibson +from igibson.objects.articulated_object import ArticulatedObject +from igibson.objects.ycb_object import YCBObject + + +def main(): + p.connect(p.GUI) + p.setGravity(0, 0, -9.8) + p.setTimeStep(1.0 / 240.0) + + floor = os.path.join(pybullet_data.getDataPath(), "mjcf/ground_plane.xml") + p.loadMJCF(floor) + + cabinet_0007 = os.path.join(igibson.assets_path, "models/cabinet2/cabinet_0007.urdf") + cabinet_0004 = os.path.join(igibson.assets_path, "models/cabinet/cabinet_0004.urdf") + + obj1 = ArticulatedObject(filename=cabinet_0007) + obj1.load() + obj1.set_position([0, 0, 0.5]) + + obj2 = ArticulatedObject(filename=cabinet_0004) + obj2.load() + obj2.set_position([0, 0, 2]) + + obj3 = YCBObject("003_cracker_box") + obj3.load() + obj3.set_position_orientation([0, 0, 1.2], [0, 0, 0, 1]) + + for _ in range(24000): # at least 100 seconds + p.stepSimulation() + time.sleep(1.0 / 240.0) + + p.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/demo/particle_demo.py b/igibson/examples/demo/particle_demo.py new file mode 100644 index 000000000..583ca988d --- /dev/null +++ b/igibson/examples/demo/particle_demo.py @@ -0,0 +1,78 @@ +import os + +import numpy as np + +from igibson import object_states +from igibson.objects.articulated_object import URDFObject +from igibson.scenes.empty_scene import EmptyScene +from igibson.simulator import Simulator +from igibson.utils.assets_utils import get_ig_model_path + + +def main(): + s = Simulator(mode="iggui", image_width=1280, image_height=720) + + scene = EmptyScene() + s.import_scene(scene) + + model_path = os.path.join(get_ig_model_path("sink", "sink_1"), "sink_1.urdf") + + sink = URDFObject( + filename=model_path, + category="sink", + name="sink_1", + scale=np.array([0.8, 0.8, 0.8]), + abilities={"toggleable": {}, "waterSource": {}}, + ) + + s.import_object(sink) + sink.set_position([1, 1, 0.8]) + sink.states[object_states.ToggledOn].set_value(True) + + model_path = get_ig_model_path("scrub_brush", "scrub_brush_000") + model_filename = os.path.join(model_path, "scrub_brush_000.urdf") + max_bbox = [0.1, 0.1, 0.1] + avg = {"size": max_bbox, "density": 67.0} + brush = URDFObject( + filename=model_filename, + category="scrub_brush", + name="scrub_brush", + avg_obj_dims=avg, + fit_avg_dim_volume=True, + model_path=model_path, + ) + s.import_object(brush) + brush.set_position([0, -2, 0.4]) + + model_path = os.path.join(get_ig_model_path("breakfast_table", "19203"), "19203.urdf") + desk = URDFObject( + filename=model_path, + category="table", + name="19898", + scale=np.array([0.8, 0.8, 0.8]), + abilities={"dustyable": {}, "stainable": {}}, + ) + + print(desk.states.keys()) + s.import_object(desk) + desk.set_position([1, -2, 0.4]) + assert desk.states[object_states.Dusty].set_value(True) + assert desk.states[object_states.Stained].set_value(True) + + model_path = os.path.join(get_ig_model_path("bowl", "68_0"), "68_0.urdf") + bowl = URDFObject(filename=model_path, category="bowl", name="bowl", abilities={"dustyable": {}, "stainable": {}}) + s.import_object(bowl) + assert bowl.states[object_states.OnTop].set_value(desk, True, use_ray_casting_method=True) + assert bowl.states[object_states.Dusty].set_value(True) + assert bowl.states[object_states.Stained].set_value(True) + # Main simulation loop + try: + while True: + s.step() + print("Dirty: ", desk.states[object_states.Dusty].get_value()) + finally: + s.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/demo/particle_reset_demo.py b/igibson/examples/demo/particle_reset_demo.py new file mode 100644 index 000000000..bb3db0714 --- /dev/null +++ b/igibson/examples/demo/particle_reset_demo.py @@ -0,0 +1,86 @@ +import os + +import numpy as np +import pybullet as p + +from igibson import object_states +from igibson.objects.articulated_object import URDFObject +from igibson.scenes.empty_scene import EmptyScene +from igibson.simulator import Simulator +from igibson.utils.assets_utils import get_ig_model_path + + +def main(): + s = Simulator(mode="gui", image_width=1280, image_height=720) + + scene = EmptyScene() + s.import_scene(scene) + + model_path = os.path.join(get_ig_model_path("sink", "sink_1"), "sink_1.urdf") + + sink = URDFObject( + filename=model_path, + category="sink", + name="sink_1", + scale=np.array([0.8, 0.8, 0.8]), + abilities={"toggleable": {}, "waterSource": {}}, + ) + + s.import_object(sink) + sink.set_position([1, 1, 0.8]) + sink.states[object_states.ToggledOn].set_value(True) + + model_path = get_ig_model_path("vacuum", "vacuum_000") + model_filename = os.path.join(model_path, "vacuum_000.urdf") + max_bbox = [0.1, 0.1, 0.1] + avg = {"size": max_bbox, "density": 67.0} + brush = URDFObject( + filename=model_filename, category="vacuum", name="vacuum", avg_obj_dims=avg, fit_avg_dim_volume=True + ) + s.import_object(brush) + brush.set_position([0, -2, 0.4]) + + model_path = os.path.join(get_ig_model_path("breakfast_table", "19203"), "19203.urdf") + desk = URDFObject( + filename=model_path, + category="table", + name="19898", + scale=np.array([0.8, 0.8, 0.8]), + abilities={"dustyable": {}}, + ) + + print(desk.states.keys()) + s.import_object(desk) + desk.set_position([1, -2, 0.4]) + desk.states[object_states.Dusty].set_value(True) + + # Dump the initial state. + state_dump = p.saveState() + dump = desk.dump_state() + print(dump) + + # Main simulation loop. + try: + # Keep stepping until we reach a clean state. + while desk.states[object_states.Dusty].get_value(): + s.step() + if not desk.states[object_states.Dusty].get_value(): + print("Cleaned.") + print("Dirty: ", desk.states[object_states.Dusty].get_value()) + + # Reset to the initial state + p.restoreState(state_dump) + p.removeState(state_dump) + desk.load_state(dump) + desk.force_wakeup() + + # Continue simulation + while True: + s.step() + print("Dirty: ", desk.states[object_states.Dusty].get_value()) + finally: + s.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/demo/physics_engine_example.py b/igibson/examples/demo/physics_engine_example.py new file mode 100644 index 000000000..8c77710b4 --- /dev/null +++ b/igibson/examples/demo/physics_engine_example.py @@ -0,0 +1,50 @@ +import os +import sys + +import pybullet as p + +import igibson +from igibson.utils.assets_utils import get_scene_path + + +def main(): + if len(sys.argv) > 1: + model_path = sys.argv[1] + else: + model_path = os.path.join(get_scene_path("Rs"), "mesh_z_up.obj") + + p.connect(p.GUI) + p.setGravity(0, 0, -9.8) + p.setTimeStep(1.0 / 240.0) + + # Load scenes + collision_id = p.createCollisionShape( + p.GEOM_MESH, fileName=model_path, meshScale=1.0, flags=p.GEOM_FORCE_CONCAVE_TRIMESH + ) + visual_id = p.createVisualShape(p.GEOM_MESH, fileName=model_path, meshScale=1.0) + + mesh_id = p.createMultiBody(baseCollisionShapeIndex=collision_id, baseVisualShapeIndex=visual_id) + + # Load robots + turtlebot_urdf = os.path.join(igibson.assets_path, "models/turtlebot/turtlebot.urdf") + robot_id = p.loadURDF(turtlebot_urdf, flags=p.URDF_USE_MATERIAL_COLORS_FROM_MTL) + + # Load objects + obj_visual_filename = os.path.join(igibson.assets_path, "models/ycb/002_master_chef_can/textured_simple.obj") + obj_collision_filename = os.path.join( + igibson.assets_path, "models/ycb/002_master_chef_can/textured_simple_vhacd.obj" + ) + collision_id = p.createCollisionShape(p.GEOM_MESH, fileName=obj_collision_filename, meshScale=1.0) + visual_id = p.createVisualShape(p.GEOM_MESH, fileName=obj_visual_filename, meshScale=1.0) + object_id = p.createMultiBody( + baseCollisionShapeIndex=collision_id, baseVisualShapeIndex=visual_id, basePosition=[1.0, 0.0, 1.0], baseMass=0.1 + ) + + for _ in range(10000): + p.stepSimulation() + + p.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/objects/sample_objects_logic.py b/igibson/examples/demo/ray_cast_sampling.py similarity index 70% rename from igibson/examples/objects/sample_objects_logic.py rename to igibson/examples/demo/ray_cast_sampling.py index 6421aead4..ad4bebeae 100644 --- a/igibson/examples/objects/sample_objects_logic.py +++ b/igibson/examples/demo/ray_cast_sampling.py @@ -1,4 +1,3 @@ -import logging import os import numpy as np @@ -6,7 +5,6 @@ import igibson from igibson import object_states from igibson.objects.articulated_object import URDFObject -from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings from igibson.scenes.empty_scene import EmptyScene from igibson.simulator import Simulator from igibson.utils.assets_utils import download_assets @@ -15,22 +13,10 @@ def main(): - """ - Demo to use the raycasting-based sampler to load objects onTop and/or inside another - Loads a cabinet, a microwave open on top of it, and two plates with apples on top, one inside and one on top of the cabinet - Then loads a shelf and YCB cracker boxes inside of it - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - settings = MeshRendererSettings(enable_shadow=True, msaa=False, optimized=False) - s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings) - scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1]) + s = Simulator(mode="gui") + scene = EmptyScene() s.import_scene(scene) - # Set a better viewing direction - s.viewer.initial_pos = [-0.2, -1.3, 2.7] - s.viewer.initial_view_direction = [0.3, 0.8, -0.5] - s.viewer.reset_viewer() - try: sample_microwave_plates_apples(s) sample_boxes_on_shelf(s) @@ -53,27 +39,22 @@ def sample_microwave_plates_apples(simulator): apple_dir = os.path.join(igibson.ig_dataset_path, "objects/apple/00_0/") apple_filename = os.path.join(apple_dir, "00_0.urdf") - # Load cabinet, set position manually, and step 100 times - logging.info("Loading cabinet") cabinet = URDFObject(filename=cabinet_filename, category="cabinet", scale=np.array([2.0, 2.0, 2.0])) simulator.import_object(cabinet) cabinet.set_position([0, 0, 0.5]) for _ in range(100): simulator.step() - # Load microwave, set position on top of the cabinet, open it, and step 100 times - logging.info("Loading microwave Open and OnTop of the cabinet") microwave = URDFObject( filename=microwave_filename, category="microwave", model_path=microwave_dir, scale=np.array([0.5, 0.5, 0.5]) ) simulator.import_object(microwave) assert microwave.states[object_states.OnTop].set_value(cabinet, True, use_ray_casting_method=True) - assert microwave.states[object_states.Open].set_value(True) - logging.info("Microwave loaded and placed") + microwave.states[object_states.Open].set_value(True) for _ in range(100): simulator.step() + print("Microwave placed") - logging.info("Loading plates") for i in range(3): plate = URDFObject( filename=plate_filename, category="plate", model_path=plate_dir, bounding_box=[0.25, 0.25, 0.05] @@ -82,22 +63,20 @@ def sample_microwave_plates_apples(simulator): # Put the 1st plate in the microwave if i == 0: - logging.info("Loading plate Inside the microwave") assert plate.states[object_states.Inside].set_value(microwave, True, use_ray_casting_method=True) else: - logging.info("Loading plate OnTop the microwave") assert plate.states[object_states.OnTop].set_value(microwave, True, use_ray_casting_method=True) - logging.info("Plate %d loaded and placed." % i) + print("Plate %d placed." % i) + for _ in range(100): simulator.step() - logging.info("Loading three apples OnTop of the plate") for j in range(3): apple = URDFObject(filename=apple_filename, category="apple", model_path=apple_dir) simulator.import_object(apple) assert apple.states[object_states.OnTop].set_value(plate, True, use_ray_casting_method=True) - logging.info("Apple %d loaded and placed." % j) + for _ in range(100): simulator.step() diff --git a/igibson/examples/demo/robot_example.py b/igibson/examples/demo/robot_example.py new file mode 100644 index 000000000..460ee2676 --- /dev/null +++ b/igibson/examples/demo/robot_example.py @@ -0,0 +1,64 @@ +import os +import time + +import numpy as np +import pybullet as p +import pybullet_data + +import igibson +from igibson.robots.fetch_robot import Fetch +from igibson.robots.jr2_kinova_robot import JR2_Kinova +from igibson.robots.locobot_robot import Locobot +from igibson.robots.turtlebot_robot import Turtlebot +from igibson.utils.utils import parse_config + + +def main(): + p.connect(p.GUI) + p.setGravity(0, 0, -9.8) + p.setTimeStep(1.0 / 240.0) + + floor = os.path.join(pybullet_data.getDataPath(), "mjcf/ground_plane.xml") + p.loadMJCF(floor) + + robots = [] + config = parse_config(os.path.join(igibson.example_config_path, "fetch_reaching.yaml")) + fetch = Fetch(config) + robots.append(fetch) + + config = parse_config(os.path.join(igibson.example_config_path, "jr_reaching.yaml")) + jr = JR2_Kinova(config) + robots.append(jr) + + config = parse_config(os.path.join(igibson.example_config_path, "locobot_point_nav.yaml")) + locobot = Locobot(config) + robots.append(locobot) + + config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_point_nav.yaml")) + turtlebot = Turtlebot(config) + robots.append(turtlebot) + + positions = [[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]] + + for robot, position in zip(robots, positions): + robot.load() + robot.set_position(position) + robot.robot_specific_reset() + robot.keep_still() + + for _ in range(2400): # keep still for 10 seconds + p.stepSimulation() + time.sleep(1.0 / 240.0) + + for _ in range(2400): # move with small random actions for 10 seconds + for robot, position in zip(robots, positions): + action = np.random.uniform(-1, 1, robot.action_dim) + robot.apply_action(action) + p.stepSimulation() + time.sleep(1.0 / 240.0) + + p.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/demo/scene_example.py b/igibson/examples/demo/scene_example.py new file mode 100644 index 000000000..9a642eb3f --- /dev/null +++ b/igibson/examples/demo/scene_example.py @@ -0,0 +1,36 @@ +import time + +import numpy as np +import pybullet as p + +from igibson.scenes.gibson_indoor_scene import StaticIndoorScene + + +def main(): + p.connect(p.GUI) + p.setGravity(0, 0, -9.8) + p.setTimeStep(1.0 / 240.0) + + scene = StaticIndoorScene("Rs", build_graph=True, pybullet_load_texture=True) + scene.load() + + np.random.seed(0) + for _ in range(10): + random_floor = scene.get_random_floor() + p1 = scene.get_random_point(random_floor)[1] + p2 = scene.get_random_point(random_floor)[1] + shortest_path, geodesic_distance = scene.get_shortest_path(random_floor, p1[:2], p2[:2], entire_path=True) + print("random point 1:", p1) + print("random point 2:", p2) + print("geodesic distance between p1 and p2", geodesic_distance) + print("shortest path from p1 to p2:", shortest_path) + + for _ in range(24000): # at least 100 seconds + p.stepSimulation() + time.sleep(1.0 / 240.0) + + p.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/demo/scene_interactive_example.py b/igibson/examples/demo/scene_interactive_example.py new file mode 100644 index 000000000..d21032db4 --- /dev/null +++ b/igibson/examples/demo/scene_interactive_example.py @@ -0,0 +1,23 @@ +import numpy as np + +from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene +from igibson.simulator import Simulator + + +def main(): + s = Simulator(mode="gui", image_width=512, image_height=512, device_idx=0) + scene = InteractiveIndoorScene("Rs_int", texture_randomization=False, object_randomization=False) + s.import_ig_scene(scene) + + np.random.seed(0) + for _ in range(10): + pt = scene.get_random_point_by_room_type("living_room")[1] + print("random point in living_room", pt) + + for _ in range(1000): + s.step() + s.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/demo/scene_interactive_example_highlight.py b/igibson/examples/demo/scene_interactive_example_highlight.py new file mode 100644 index 000000000..77b630206 --- /dev/null +++ b/igibson/examples/demo/scene_interactive_example_highlight.py @@ -0,0 +1,32 @@ +import numpy as np + +from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings +from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene +from igibson.simulator import Simulator + + +def main(): + settings = MeshRendererSettings(optimized=True) + s = Simulator(mode="gui", image_width=512, image_height=512, device_idx=0, rendering_settings=settings) + scene = InteractiveIndoorScene("Rs_int", texture_randomization=False, object_randomization=False) + s.import_ig_scene(scene) + np.random.seed(0) + for _ in range(10): + pt = scene.get_random_point_by_room_type("living_room")[1] + print("random point in living_room", pt) + + for i in range(1000): + s.step() + if i % 100 == 0: + for obj in scene.objects_by_category["window"]: + obj.highlight() + + if i % 100 == 50: + for obj in scene.objects_by_category["window"]: + obj.unhighlight() + + s.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/demo/scene_interactive_object_rand_example.py b/igibson/examples/demo/scene_interactive_object_rand_example.py new file mode 100644 index 000000000..e4e0188b0 --- /dev/null +++ b/igibson/examples/demo/scene_interactive_object_rand_example.py @@ -0,0 +1,21 @@ +from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene +from igibson.simulator import Simulator + + +def main(): + s = Simulator(mode="gui", image_width=512, image_height=512, device_idx=0) + + for random_seed in range(10): + scene = InteractiveIndoorScene( + "Rs_int", texture_randomization=False, object_randomization=True, object_randomization_idx=random_seed + ) + s.import_ig_scene(scene) + for i in range(1000): + s.step() + s.reload() + + s.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/demo/scene_interactive_partial_loading_example.py b/igibson/examples/demo/scene_interactive_partial_loading_example.py new file mode 100644 index 000000000..de296b713 --- /dev/null +++ b/igibson/examples/demo/scene_interactive_partial_loading_example.py @@ -0,0 +1,22 @@ +from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene +from igibson.simulator import Simulator + + +def main(): + s = Simulator(mode="gui", image_width=512, image_height=512, device_idx=0) + scene = InteractiveIndoorScene( + "Rs_int", + texture_randomization=False, + object_randomization=False, + load_object_categories=["chair"], + load_room_types=["living_room"], + ) + s.import_ig_scene(scene) + + for _ in range(1000): + s.step() + s.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/demo/scene_interactive_texture_rand_example.py b/igibson/examples/demo/scene_interactive_texture_rand_example.py new file mode 100644 index 000000000..84f51a206 --- /dev/null +++ b/igibson/examples/demo/scene_interactive_texture_rand_example.py @@ -0,0 +1,18 @@ +from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene +from igibson.simulator import Simulator + + +def main(): + s = Simulator(mode="gui", image_width=512, image_height=512, device_idx=0) + scene = InteractiveIndoorScene("Rs_int", texture_randomization=True, object_randomization=False) + s.import_ig_scene(scene) + + for i in range(10000): + if i % 1000 == 0: + scene.randomize_texture() + s.step() + s.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/demo/scene_stadium_example.py b/igibson/examples/demo/scene_stadium_example.py new file mode 100644 index 000000000..3b075e659 --- /dev/null +++ b/igibson/examples/demo/scene_stadium_example.py @@ -0,0 +1,24 @@ +import time + +import pybullet as p + +from igibson.scenes.stadium_scene import StadiumScene + + +def main(): + p.connect(p.GUI) + p.setGravity(0, 0, -9.8) + p.setTimeStep(1.0 / 240.0) + + scene = StadiumScene() + scene.load() + + for _ in range(24000): # at least 100 seconds + p.stepSimulation() + time.sleep(1.0 / 240.0) + + p.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/demo/simulator_example.py b/igibson/examples/demo/simulator_example.py new file mode 100644 index 000000000..95e9dc94e --- /dev/null +++ b/igibson/examples/demo/simulator_example.py @@ -0,0 +1,41 @@ +import os + +import numpy as np + +import igibson +from igibson.objects.ycb_object import YCBObject +from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings +from igibson.render.profiler import Profiler +from igibson.robots.turtlebot_robot import Turtlebot +from igibson.scenes.gibson_indoor_scene import StaticIndoorScene +from igibson.simulator import Simulator +from igibson.utils.utils import parse_config + + +def main(): + config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_demo.yaml")) + settings = MeshRendererSettings(enable_shadow=False, msaa=False) + s = Simulator(mode="gui", image_width=256, image_height=256, rendering_settings=settings) + + scene = StaticIndoorScene("Rs", build_graph=True, pybullet_load_texture=True) + s.import_scene(scene) + turtlebot = Turtlebot(config) + s.import_robot(turtlebot) + + for _ in range(10): + obj = YCBObject("003_cracker_box") + s.import_object(obj) + obj.set_position_orientation(np.random.uniform(low=0, high=2, size=3), [0, 0, 0, 1]) + + print(s.renderer.instances) + + for i in range(10000): + with Profiler("Simulator step"): + turtlebot.apply_action([0.1, 0.1]) + s.step() + rgb = s.renderer.render_robot_cameras(modes=("rgb")) + s.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/demo/sliceable_reset_demo.py b/igibson/examples/demo/sliceable_reset_demo.py new file mode 100644 index 000000000..8b8a3215c --- /dev/null +++ b/igibson/examples/demo/sliceable_reset_demo.py @@ -0,0 +1,84 @@ +import os + +import numpy as np +import pybullet as p + +from igibson import object_states +from igibson.objects.articulated_object import URDFObject +from igibson.objects.multi_object_wrappers import ObjectGrouper, ObjectMultiplexer +from igibson.scenes.empty_scene import EmptyScene +from igibson.simulator import Simulator +from igibson.utils.assets_utils import get_ig_model_path + + +def main(): + s = Simulator(mode="gui", image_width=1280, image_height=720) + scene = EmptyScene() + s.import_scene(scene) + + model_path = os.path.join(get_ig_model_path("breakfast_table", "19203"), "19203.urdf") + desk = URDFObject( + filename=model_path, category="table", name="19898", scale=np.array([0.8, 0.8, 0.8]), abilities={} + ) + s.import_object(desk) + desk.set_position([0, 0, 0.4]) + + model_path = os.path.join(get_ig_model_path("apple", "00_0"), "00_0.urdf") + simulator_obj = URDFObject( + model_path, name="00_0", category="apple", scale=np.array([1.0, 1.0, 1.0]), initial_pos=[100, 100, -100] + ) + + whole_object = simulator_obj + object_parts = [] + + for i, part in enumerate(simulator_obj.metadata["object_parts"]): + category = part["category"] + model = part["model"] + # Scale the offset accordingly + part_pos = part["pos"] * whole_object.scale + part_orn = part["orn"] + model_path = get_ig_model_path(category, model) + filename = os.path.join(model_path, model + ".urdf") + obj_name = whole_object.name + "_part_{}".format(i) + simulator_obj_part = URDFObject( + filename, + name=obj_name, + category=category, + model_path=model_path, + scale=whole_object.scale, + initial_pos=[100 + i, 100, -100], + ) + object_parts.append((simulator_obj_part, (part_pos, part_orn))) + + grouped_obj_parts = ObjectGrouper(object_parts) + apple = ObjectMultiplexer(whole_object.name + "_multiplexer", [whole_object, grouped_obj_parts], 0) + + s.import_object(apple) + apple.set_position([0, 0, 0.72]) + + # Let the apple fall + for _ in range(100): + s.step() + + # Dump the initial state. + state_dump = p.saveState() + dump = apple.dump_state() + print(dump) + + # Slice the apple and set the object parts away + apple.states[object_states.Sliced].set_value(True) + assert isinstance(apple.current_selection(), ObjectGrouper) + for obj_part in apple.objects: + obj_part.set_position([0, 0, 1]) + + p.restoreState(state_dump) + p.removeState(state_dump) + # The apple should become whole again + apple.load_state(dump) + + while True: + s.step() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/demo/stable_baselines3_behavior_example.py b/igibson/examples/demo/stable_baselines3_behavior_example.py new file mode 100644 index 000000000..af3f6c4b7 --- /dev/null +++ b/igibson/examples/demo/stable_baselines3_behavior_example.py @@ -0,0 +1,153 @@ +import os +from typing import Callable + +import igibson +from igibson.envs.behavior_env import BehaviorEnv + +try: + import gym + import torch as th + import torch.nn as nn + from stable_baselines3 import PPO + from stable_baselines3.common.evaluation import evaluate_policy + from stable_baselines3.common.preprocessing import maybe_transpose + from stable_baselines3.common.torch_layers import BaseFeaturesExtractor + from stable_baselines3.common.utils import set_random_seed + from stable_baselines3.common.vec_env import SubprocVecEnv, VecMonitor + +except ModuleNotFoundError: + print("stable-baselines3 is not installed. You would need to do: pip install stable-baselines3") + exit(1) + + +""" +Example training code using stable-baselines3 PPO for one BEHAVIOR activity. +Note that due to the sparsity of the reward, this training code will not converge and achieve task success. +This only serves as a starting point that users can further build upon. +""" + + +class CustomCombinedExtractor(BaseFeaturesExtractor): + def __init__(self, observation_space: gym.spaces.Dict): + # We do not know features-dim here before going over all the items, + # so put something dummy for now. PyTorch requires calling + # nn.Module.__init__ before adding modules + super(CustomCombinedExtractor, self).__init__(observation_space, features_dim=1) + + extractors = {} + + total_concat_size = 0 + feature_size = 128 + for key, subspace in observation_space.spaces.items(): + if key in ["proprioception", "task_obs"]: + extractors[key] = nn.Sequential(nn.Linear(subspace.shape[0], feature_size), nn.ReLU()) + elif key in ["rgb", "highlight", "depth", "seg", "ins_seg"]: + n_input_channels = subspace.shape[2] # channel last + cnn = nn.Sequential( + nn.Conv2d(n_input_channels, 32, kernel_size=8, stride=4, padding=0), + nn.ReLU(), + nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=0), + nn.ReLU(), + nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0), + nn.ReLU(), + nn.Flatten(), + ) + test_tensor = th.zeros([subspace.shape[2], subspace.shape[0], subspace.shape[1]]) + with th.no_grad(): + n_flatten = cnn(test_tensor[None]).shape[1] + fc = nn.Sequential(nn.Linear(n_flatten, feature_size), nn.ReLU()) + extractors[key] = nn.Sequential(cnn, fc) + elif key in ["scan"]: + n_input_channels = subspace.shape[1] # channel last + cnn = nn.Sequential( + nn.Conv1d(n_input_channels, 32, kernel_size=8, stride=4, padding=0), + nn.ReLU(), + nn.Conv1d(32, 64, kernel_size=4, stride=2, padding=0), + nn.ReLU(), + nn.Conv1d(64, 64, kernel_size=3, stride=1, padding=0), + nn.ReLU(), + nn.Flatten(), + ) + test_tensor = th.zeros([subspace.shape[1], subspace.shape[0]]) + with th.no_grad(): + n_flatten = cnn(test_tensor[None]).shape[1] + fc = nn.Sequential(nn.Linear(n_flatten, feature_size), nn.ReLU()) + extractors[key] = nn.Sequential(cnn, fc) + else: + raise ValueError("Unknown observation key: %s" % key) + total_concat_size += feature_size + + self.extractors = nn.ModuleDict(extractors) + + # Update the features dim manually + self._features_dim = total_concat_size + + def forward(self, observations) -> th.Tensor: + encoded_tensor_list = [] + + # self.extractors contain nn.Modules that do all the processing. + for key, extractor in self.extractors.items(): + if key in ["rgb", "highlight", "depth", "seg", "ins_seg"]: + observations[key] = observations[key].permute((0, 3, 1, 2)) + elif key in ["scan"]: + observations[key] = observations[key].permute((0, 2, 1)) + encoded_tensor_list.append(extractor(observations[key])) + # Return a (B, self._features_dim) PyTorch tensor, where B is batch dimension. + return th.cat(encoded_tensor_list, dim=1) + + +def main(): + config_file = "behavior_onboard_sensing.yaml" + tensorboard_log_dir = "log_dir" + num_cpu = 8 + + def make_env(rank: int, seed: int = 0) -> Callable: + def _init() -> BehaviorEnv: + env = BehaviorEnv( + config_file=os.path.join(igibson.example_config_path, config_file), + mode="headless", + action_timestep=1 / 30.0, + physics_timestep=1 / 300.0, + ) + env.seed(seed + rank) + return env + + set_random_seed(seed) + return _init + + env = SubprocVecEnv([make_env(i) for i in range(num_cpu)]) + env = VecMonitor(env) + + eval_env = BehaviorEnv( + config_file=os.path.join(igibson.example_config_path, config_file), + mode="headless", + action_timestep=1 / 30.0, + physics_timestep=1 / 300.0, + ) + + policy_kwargs = dict( + features_extractor_class=CustomCombinedExtractor, + ) + os.makedirs(tensorboard_log_dir, exist_ok=True) + model = PPO("MultiInputPolicy", env, verbose=1, tensorboard_log=tensorboard_log_dir, policy_kwargs=policy_kwargs) + print(model.policy) + + # Random Agent, before training + mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=10) + print(f"Before Training: Mean reward: {mean_reward} +/- {std_reward:.2f}") + + model.learn(1000000) + + mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=20) + print(f"After Training: Mean reward: {mean_reward} +/- {std_reward:.2f}") + + model.save("ckpt") + del model + + model = PPO.load("ckpt") + mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=20) + print(f"After Loading: Mean reward: {mean_reward} +/- {std_reward:.2f}") + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/learning/stable_baselines3_example.py b/igibson/examples/demo/stable_baselines3_example.py similarity index 86% rename from igibson/examples/learning/stable_baselines3_example.py rename to igibson/examples/demo/stable_baselines3_example.py index d6b9bf71a..263dc0f25 100644 --- a/igibson/examples/learning/stable_baselines3_example.py +++ b/igibson/examples/demo/stable_baselines3_example.py @@ -1,4 +1,3 @@ -import logging import os from typing import Callable @@ -96,17 +95,10 @@ def forward(self, observations) -> th.Tensor: def main(): - """ - Example to set a training process with Stable Baselines 3 - Loads a scene and starts the training process for a navigation task with images using PPO - Saves the checkpoint and loads it again - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - config_file = "turtlebot_nav.yaml" + config_file = "turtlebot_point_nav.yaml" tensorboard_log_dir = "log_dir" - num_environments = 8 + num_cpu = 8 - # Function callback to create environments def make_env(rank: int, seed: int = 0) -> Callable: def _init() -> iGibsonEnv: env = iGibsonEnv( @@ -121,11 +113,9 @@ def _init() -> iGibsonEnv: set_random_seed(seed) return _init - # Multiprocess - env = SubprocVecEnv([make_env(i) for i in range(num_environments)]) + env = SubprocVecEnv([make_env(i) for i in range(num_cpu)]) env = VecMonitor(env) - # Create a new environment for evaluation eval_env = iGibsonEnv( config_file=os.path.join(igibson.example_config_path, config_file), mode="headless", @@ -133,7 +123,6 @@ def _init() -> iGibsonEnv: physics_timestep=1 / 120.0, ) - # Obtain the arguments/parameters for the policy and create the PPO model policy_kwargs = dict( features_extractor_class=CustomCombinedExtractor, ) @@ -141,25 +130,19 @@ def _init() -> iGibsonEnv: model = PPO("MultiInputPolicy", env, verbose=1, tensorboard_log=tensorboard_log_dir, policy_kwargs=policy_kwargs) print(model.policy) - # Random Agent, evaluation before training + # Random Agent, before training mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=10) print(f"Before Training: Mean reward: {mean_reward} +/- {std_reward:.2f}") - # Train the model for the given number of steps model.learn(1000000) - # Evaluate the policy after training mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=20) print(f"After Training: Mean reward: {mean_reward} +/- {std_reward:.2f}") - # Save the trained model and delete it model.save("ckpt") del model - # Reload the trained model from file model = PPO.load("ckpt") - - # Evaluate the trained model loaded from file mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=20) print(f"After Loading: Mean reward: {mean_reward} +/- {std_reward:.2f}") diff --git a/igibson/examples/object_states/temperature_demo.py b/igibson/examples/demo/temperature_example.py similarity index 66% rename from igibson/examples/object_states/temperature_demo.py rename to igibson/examples/demo/temperature_example.py index 472e99f8a..1d3424460 100644 --- a/igibson/examples/object_states/temperature_demo.py +++ b/igibson/examples/demo/temperature_example.py @@ -1,4 +1,3 @@ -import logging import os import numpy as np @@ -8,27 +7,18 @@ from igibson.objects.articulated_object import URDFObject from igibson.scenes.empty_scene import EmptyScene from igibson.simulator import Simulator +from igibson.utils.assets_utils import download_assets + +download_assets() def main(): - """ - Demo of temperature change - Loads a stove, a microwave and an oven, all toggled on, and five frozen apples - The user can move the apples to see them change from frozen, to normal temperature, to cooked and burnt - This demo also shows how to load objects ToggledOn and how to set the initial temperature of an object - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - s = Simulator(mode="gui_interactive", image_width=1280, image_height=720) - # Set a better viewing direction - s.viewer.initial_pos = [0.0, -0.7, 2.1] - s.viewer.initial_view_direction = [0.0, 0.8, -0.7] - s.viewer.reset_viewer() + s = Simulator(mode="iggui", image_width=1280, image_height=720) try: - scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1]) + scene = EmptyScene() s.import_scene(scene) - # Load stove ON stove_dir = os.path.join(igibson.ig_dataset_path, "objects/stove/101930/") stove_urdf = os.path.join(stove_dir, "101930.urdf") stove = URDFObject(stove_urdf, name="stove", category="stove", model_path=stove_dir) @@ -36,7 +26,6 @@ def main(): stove.set_position([0, 0, 0.782]) stove.states[object_states.ToggledOn].set_value(True) - # Load microwave ON microwave_dir = os.path.join(igibson.ig_dataset_path, "objects/microwave/7128/") microwave_urdf = os.path.join(microwave_dir, "7128.urdf") microwave = URDFObject(microwave_urdf, name="microwave", category="microwave", model_path=microwave_dir) @@ -44,7 +33,6 @@ def main(): microwave.set_position([2, 0, 0.401]) microwave.states[object_states.ToggledOn].set_value(True) - # Load oven ON oven_dir = os.path.join(igibson.ig_dataset_path, "objects/oven/7120/") oven_urdf = os.path.join(oven_dir, "7120.urdf") oven = URDFObject(oven_urdf, name="oven", category="oven", model_path=oven_dir) @@ -52,14 +40,12 @@ def main(): oven.set_position([-2, 0, 0.816]) oven.states[object_states.ToggledOn].set_value(True) - # Load tray tray_dir = os.path.join(igibson.ig_dataset_path, "objects/tray/tray_000/") tray_urdf = os.path.join(tray_dir, "tray_000.urdf") tray = URDFObject(tray_urdf, name="tray", category="tray", model_path=tray_dir, scale=np.array([0.1, 0.1, 0.1])) s.import_object(tray) tray.set_position([0, 0, 1.55]) - # Load fridge fridge_dir = os.path.join(igibson.ig_dataset_path, "objects/fridge/12252/") fridge_urdf = os.path.join(fridge_dir, "12252.urdf") fridge = URDFObject( @@ -77,34 +63,32 @@ def main(): s.import_object(fridge) fridge.set_position_orientation([-1, -3, 0.75], [1, 0, 0, 0]) - # Load 5 apples apple_dir = os.path.join(igibson.ig_dataset_path, "objects/apple/00_0/") apple_urdf = os.path.join(apple_dir, "00_0.urdf") + apples = [] for i in range(5): apple = URDFObject(apple_urdf, name="apple", category="apple", model_path=apple_dir) s.import_object(apple) apple.set_position([0, i * 0.05, 1.65]) apples.append(apple) + s.step() - # Set initial temperature of the apples to -50 degrees Celsius for apple in apples: apple.states[object_states.Temperature].set_value(-50) + # Run simulation for 1000 steps while True: s.step() - for idx, apple in enumerate(apples): - logging.info( - "Apple %d Temperature: %.2f. Frozen: %r. Cooked: %r. Burnt: %r." - % ( - idx + 1, - apple.states[object_states.Temperature].get_value(), - apple.states[object_states.Frozen].get_value(), - apple.states[object_states.Cooked].get_value(), - apple.states[object_states.Burnt].get_value(), - ) + print( + "Apple Temperature: %.2f. Frozen: %r. Cooked: %r. Burnt: %r." + % ( + apple.states[object_states.Temperature].get_value(), + apple.states[object_states.Frozen].get_value(), + apple.states[object_states.Cooked].get_value(), + apple.states[object_states.Burnt].get_value(), ) - print() + ) finally: s.disconnect() diff --git a/igibson/examples/demo/texture_change_example.py b/igibson/examples/demo/texture_change_example.py new file mode 100644 index 000000000..0d11250e1 --- /dev/null +++ b/igibson/examples/demo/texture_change_example.py @@ -0,0 +1,42 @@ +import os + +import igibson +from igibson import object_states +from igibson.objects.articulated_object import URDFObject +from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings +from igibson.render.profiler import Profiler +from igibson.scenes.empty_scene import EmptyScene +from igibson.simulator import Simulator +from igibson.utils.assets_utils import get_ig_model_path +from igibson.utils.utils import parse_config + + +def main(): + config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_demo.yaml")) + settings = MeshRendererSettings(enable_shadow=False, msaa=False, optimized=True) + s = Simulator(mode="gui", image_width=512, image_height=512, rendering_settings=settings) + + scene = EmptyScene() + s.import_scene(scene) + + apple_dir = get_ig_model_path("apple", "00_0") + apple_urdf = os.path.join(get_ig_model_path("apple", "00_0"), "00_0.urdf") + + apple = URDFObject(apple_urdf, name="apple", category="apple", model_path=apple_dir) + + s.import_object(apple) + apple.set_position([0, 0, 0.2]) + temp = 0 + for i in range(10000): + if i % 10 == 0: + temp += 1 + print(temp) + apple.states[object_states.Temperature].set_value(temp) + print(apple.states[object_states.Cooked].get_value()) + with Profiler("Simulator step"): + s.step() + s.disconnect() + + +if __name__ == "__main__": + main() diff --git a/igibson/examples/robots/trav_map_vis_example.py b/igibson/examples/demo/trav_map_vis_example.py similarity index 100% rename from igibson/examples/robots/trav_map_vis_example.py rename to igibson/examples/demo/trav_map_vis_example.py diff --git a/igibson/examples/vr/data_save_replay/vr_sr.py b/igibson/examples/demo/vr_demos/data_save_replay/vr_sr.py similarity index 97% rename from igibson/examples/vr/data_save_replay/vr_sr.py rename to igibson/examples/demo/vr_demos/data_save_replay/vr_sr.py index 6d0c5524e..b1fd68d43 100644 --- a/igibson/examples/vr/data_save_replay/vr_sr.py +++ b/igibson/examples/demo/vr_demos/data_save_replay/vr_sr.py @@ -75,7 +75,7 @@ def run_action_sr(mode): scene = InteractiveIndoorScene( "Rs_int", load_object_categories=["walls", "floors", "ceilings"], load_room_types=["kitchen"] ) - s.import_scene(scene) + s.import_ig_scene(scene) p.setAdditionalSearchPath(pybullet_data.getDataPath()) # Note: uncomment these lines during replay to see the scene from an external perspective @@ -88,7 +88,7 @@ def run_action_sr(mode): # Data replay uses constraints during both save and replay modes # Note: set show_visual_head to True upon replay to see the VR head bvr_robot = BehaviorRobot(s, show_visual_head=False) - s.import_robot(bvr_robot) + s.import_behavior_robot(bvr_robot) s.register_main_vr_robot(bvr_robot) bvr_robot.set_position_orientation([0, 0, 1.5], [0, 0, 0, 1]) @@ -111,8 +111,8 @@ def run_action_sr(mode): fpath = item[0] pos = item[1] orn = item[2] - item_ob = ArticulatedObject(fpath, scale=1, renderer_params={"use_pbr": False, "use_pbr_mapping": False}) - s.import_object(item_ob) + item_ob = ArticulatedObject(fpath, scale=1) + s.import_object(item_ob, use_pbr=False, use_pbr_mapping=False) item_ob.set_position(pos) item_ob.set_orientation(orn) @@ -185,7 +185,7 @@ def run_action_sr(mode): # Get relevant VR action data and update VR agent bvr_robot.update(log_reader.get_agent_action("vr_robot")) - # vr_agent.links['eye'].show_eye() + # vr_agent.parts['eye'].show_eye() # Print debugging information if PRINT_PB: diff --git a/igibson/examples/vr/in_development/vr_body_tracker_test.py b/igibson/examples/demo/vr_demos/in_development/vr_body_tracker_test.py similarity index 92% rename from igibson/examples/vr/in_development/vr_body_tracker_test.py rename to igibson/examples/demo/vr_demos/in_development/vr_body_tracker_test.py index ba99444ab..fb7abb0bb 100644 --- a/igibson/examples/vr/in_development/vr_body_tracker_test.py +++ b/igibson/examples/demo/vr_demos/in_development/vr_body_tracker_test.py @@ -10,7 +10,7 @@ def main(): s = Simulator(mode="vr", rendering_settings=MeshRendererSettings(enable_shadow=True, optimized=True)) scene = EmptyScene() - s.import_scene(scene) + s.import_scene(scene, render_floor_plane=True) vr_agent = BehaviorRobot(s) # Main simulation loop diff --git a/igibson/examples/vr/in_development/vr_button_mapping.py b/igibson/examples/demo/vr_demos/in_development/vr_button_mapping.py similarity index 95% rename from igibson/examples/vr/in_development/vr_button_mapping.py rename to igibson/examples/demo/vr_demos/in_development/vr_button_mapping.py index dcf0f74d5..4ce2c73cc 100644 --- a/igibson/examples/vr/in_development/vr_button_mapping.py +++ b/igibson/examples/demo/vr_demos/in_development/vr_button_mapping.py @@ -15,7 +15,7 @@ def main(): s = Simulator(mode="vr", rendering_settings=MeshRendererSettings(enable_shadow=True, optimized=True)) scene = EmptyScene() - s.import_scene(scene) + s.import_scene(scene, render_floor_plane=True) # Main simulation loop while True: diff --git a/igibson/examples/vr/in_development/vr_cleaning_demo.py b/igibson/examples/demo/vr_demos/in_development/vr_cleaning_demo.py similarity index 90% rename from igibson/examples/vr/in_development/vr_cleaning_demo.py rename to igibson/examples/demo/vr_demos/in_development/vr_cleaning_demo.py index fe0376ab4..ddd518ac9 100644 --- a/igibson/examples/vr/in_development/vr_cleaning_demo.py +++ b/igibson/examples/demo/vr_demos/in_development/vr_cleaning_demo.py @@ -5,6 +5,7 @@ import igibson from igibson import object_states +from igibson.object_states.factory import prepare_object_states from igibson.objects.ycb_object import YCBObject from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings from igibson.render.mesh_renderer.mesh_renderer_vr import VrSettings @@ -44,7 +45,7 @@ if VIEWER_MANIP: s = Simulator( - mode="gui_interactive", + mode="iggui", image_width=512, image_height=512, rendering_settings=vr_rendering_settings, @@ -54,14 +55,16 @@ s = Simulator(mode="vr", rendering_settings=vr_rendering_settings, vr_settings=vr_settings) scene = InteractiveIndoorScene("Rs_int", texture_randomization=False, object_randomization=False) -s.import_scene(scene) +s.import_ig_scene(scene) if not VIEWER_MANIP: vr_agent = BehaviorRobot(s, use_gripper=USE_GRIPPER, normal_color=False) -block = YCBObject(name="036_wood_block", abilities={"soakable": {}, "cleaning_tool": {}}) +block = YCBObject(name="036_wood_block") s.import_object(block) block.set_position([1, 1, 1.8]) +block.abilities = ["soakable", "cleaning_tool"] +prepare_object_states(block, abilities={"soakable": {}, "cleaning_tool": {}}) # Set everything that can go dirty. dirtyable_objects = set( diff --git a/igibson/examples/vr/in_development/vr_hand_dex_benchmark.py b/igibson/examples/demo/vr_demos/in_development/vr_hand_dex_benchmark.py similarity index 93% rename from igibson/examples/vr/in_development/vr_hand_dex_benchmark.py rename to igibson/examples/demo/vr_demos/in_development/vr_hand_dex_benchmark.py index 154024890..cc0b3ee73 100644 --- a/igibson/examples/vr/in_development/vr_hand_dex_benchmark.py +++ b/igibson/examples/demo/vr_demos/in_development/vr_hand_dex_benchmark.py @@ -68,7 +68,7 @@ def main(): ) # scene.load_object_categories(benchmark_names) - s.import_scene(scene) + s.import_ig_scene(scene) p.setAdditionalSearchPath(pybullet_data.getDataPath()) vr_agent = BehaviorRobot(s, use_gripper=USE_GRIPPER) @@ -144,13 +144,11 @@ def main(): fpath, obj_type, scale, orn, pos, space_vec, space_val = obj_to_load[name] for i in range(len(masses)): if obj_type == "ycb": - handle = YCBObject(fpath, scale=scale, renderer_params={"use_pbr": False, "use_pbr_mapping": False}) + handle = YCBObject(fpath, scale=scale) elif obj_type == "pb": - handle = ArticulatedObject( - fpath, scale=scale, renderer_params={"use_pbr": False, "use_pbr_mapping": False} - ) + handle = ArticulatedObject(fpath, scale=scale) - s.import_object(handle) + s.import_object(handle, use_pbr=False, use_pbr_mapping=False) # Calculate new position along spacing vector new_pos = ( pos[0] + space_vec[0] * space_val * i, @@ -159,8 +157,8 @@ def main(): ) handle.set_position(new_pos) handle.set_orientation(orn) - p.changeDynamics(handle.get_body_id(), -1, mass=masses[i]) - minBox, maxBox = p.getAABB(handle.get_body_id()) + p.changeDynamics(handle.body_id, -1, mass=masses[i]) + minBox, maxBox = p.getAABB(handle.body_id) dims = [maxBox[i] - minBox[i] for i in range(3)] print("Name {} and masses: {}".format(name, masses)) print("XYZ dimensions: {}".format(dims)) @@ -286,14 +284,14 @@ def main(): orn = item["orn"] scale = item["scale"] mass = item["mass"] - item_ob = ArticulatedObject(fpath, scale=scale, renderer_params={"use_pbr": False, "use_pbr_mapping": False}) - s.import_object(item_ob) + item_ob = ArticulatedObject(fpath, scale=scale) + s.import_object(item_ob, use_pbr=False, use_pbr_mapping=False) item_ob.set_position(pos) item_ob.set_orientation(orn) objs_loaded.append(item_ob) - minBox, maxBox = p.getAABB(item_ob.get_body_id()) + minBox, maxBox = p.getAABB(item_ob.body_id) dims = [maxBox[i] - minBox[i] for i in range(3)] - p.changeDynamics(item_ob.get_body_id(), -1, mass=mass) + p.changeDynamics(item_ob.body_id, -1, mass=mass) print("Name {} and mass: {}".format(it_name, mass)) print("XYZ dimensions: {}".format(dims)) @@ -317,7 +315,7 @@ def main(): vr_agent.apply_action() if show_overlay: - ag_candidate_data = vr_agent.links["right_hand"].candidate_data + ag_candidate_data = vr_agent.parts["right_hand"].candidate_data if ag_candidate_data: t = "" for bid, link, dist in ag_candidate_data: diff --git a/igibson/examples/vr/in_development/vr_hand_speed_benchmark.py b/igibson/examples/demo/vr_demos/in_development/vr_hand_speed_benchmark.py similarity index 96% rename from igibson/examples/vr/in_development/vr_hand_speed_benchmark.py rename to igibson/examples/demo/vr_demos/in_development/vr_hand_speed_benchmark.py index e78ed0b05..b08de0f98 100644 --- a/igibson/examples/vr/in_development/vr_hand_speed_benchmark.py +++ b/igibson/examples/demo/vr_demos/in_development/vr_hand_speed_benchmark.py @@ -55,7 +55,7 @@ def main(): if VIEWER_MANIP: s = Simulator( - mode="gui_interactive", + mode="iggui", image_width=512, image_height=512, rendering_settings=vr_rendering_settings, @@ -65,7 +65,7 @@ def main(): scene = InteractiveIndoorScene("Rs_int") scene._set_first_n_objects(2) - s.import_scene(scene) + s.import_ig_scene(scene) p.setAdditionalSearchPath(pybullet_data.getDataPath()) if not VIEWER_MANIP: @@ -89,8 +89,8 @@ def main(): fpath = item[0] pos = item[1] orn = item[2] - item_ob = ArticulatedObject(fpath, scale=1, renderer_params={"use_pbr": False, "use_pbr_mapping": False}) - s.import_object(item_ob) + item_ob = ArticulatedObject(fpath, scale=1) + s.import_object(item_ob, use_pbr=False, use_pbr_mapping=False) item_ob.set_position(pos) item_ob.set_orientation(orn) diff --git a/igibson/examples/vr/in_development/vr_playground.py b/igibson/examples/demo/vr_demos/in_development/vr_playground.py similarity index 93% rename from igibson/examples/vr/in_development/vr_playground.py rename to igibson/examples/demo/vr_demos/in_development/vr_playground.py index ed4a3734d..e292ceb93 100644 --- a/igibson/examples/vr/in_development/vr_playground.py +++ b/igibson/examples/demo/vr_demos/in_development/vr_playground.py @@ -65,7 +65,7 @@ def main(): # Turn this on when debugging to speed up loading if LOAD_PARTIAL: scene._set_first_n_objects(10) - s.import_scene(scene) + s.import_ig_scene(scene) if not VR_MODE: camera_pose = np.array([0, -3, 1.2]) @@ -84,10 +84,10 @@ def main(): mass_list = [5, 10, 100, 500] mustard_start = [-1, 1.55, 1.2] for i in range(len(mass_list)): - mustard = YCBObject("006_mustard_bottle", renderer_params={"use_pbr": False, "use_pbr_mapping": False}) - s.import_object(mustard) + mustard = YCBObject("006_mustard_bottle") + s.import_object(mustard, use_pbr=False, use_pbr_mapping=False, shadow_caster=True) mustard.set_position([mustard_start[0] + i * 0.2, mustard_start[1], mustard_start[2]]) - p.changeDynamics(mustard.get_body_id(), -1, mass=mass_list[i]) + p.changeDynamics(mustard.body_id, -1, mass=mass_list[i]) # Main simulation loop while True: diff --git a/igibson/examples/vr/in_development/vr_sample_hud.py b/igibson/examples/demo/vr_demos/in_development/vr_sample_hud.py similarity index 94% rename from igibson/examples/vr/in_development/vr_sample_hud.py rename to igibson/examples/demo/vr_demos/in_development/vr_sample_hud.py index cb0f35585..848e7401a 100644 --- a/igibson/examples/vr/in_development/vr_sample_hud.py +++ b/igibson/examples/demo/vr_demos/in_development/vr_sample_hud.py @@ -70,7 +70,7 @@ def main(): s = Simulator(mode="vr", rendering_settings=vr_rendering_settings) scene = InteractiveIndoorScene("Rs_int") scene._set_obj_names_to_load(benchmark_names) - s.import_scene(scene) + s.import_ig_scene(scene) p.setAdditionalSearchPath(pybullet_data.getDataPath()) vr_agent = BehaviorRobot(s, use_gripper=USE_GRIPPER) @@ -146,13 +146,11 @@ def main(): fpath, obj_type, scale, orn, pos, space_vec, space_val = obj_to_load[name] for i in range(len(masses)): if obj_type == "ycb": - handle = YCBObject(fpath, scale=scale, renderer_params={"use_pbr": False, "use_pbr_mapping": False}) + handle = YCBObject(fpath, scale=scale) elif obj_type == "pb": - handle = ArticulatedObject( - fpath, scale=scale, renderer_params={"use_pbr": False, "use_pbr_mapping": False} - ) + handle = ArticulatedObject(fpath, scale=scale) - s.import_object(handle) + s.import_object(handle, use_pbr=False, use_pbr_mapping=False) # Calculate new position along spacing vector new_pos = ( pos[0] + space_vec[0] * space_val * i, @@ -161,7 +159,7 @@ def main(): ) handle.set_position(new_pos) handle.set_orientation(orn) - p.changeDynamics(handle.get_body_id(), -1, mass=masses[i]) + p.changeDynamics(handle.body_id, -1, mass=masses[i]) title = s.add_vr_overlay_text( text_data="Welcome to iGibson VR!", font_size=85, font_style="Bold", color=[0, 0, 0.5], pos=[150, 900] diff --git a/igibson/examples/vr/in_development/vr_tracker_test.py b/igibson/examples/demo/vr_demos/in_development/vr_tracker_test.py similarity index 94% rename from igibson/examples/vr/in_development/vr_tracker_test.py rename to igibson/examples/demo/vr_demos/in_development/vr_tracker_test.py index 8b318b1ee..892d1d507 100644 --- a/igibson/examples/vr/in_development/vr_tracker_test.py +++ b/igibson/examples/demo/vr_demos/in_development/vr_tracker_test.py @@ -14,7 +14,7 @@ def main(): s = Simulator(mode="vr", rendering_settings=MeshRendererSettings(enable_shadow=True, optimized=True)) scene = EmptyScene() - s.import_scene(scene) + s.import_scene(scene, render_floor_plane=True) # Main simulation loop while True: diff --git a/igibson/examples/vr/muvr/muvr_demo.py b/igibson/examples/demo/vr_demos/muvr/muvr_demo.py similarity index 95% rename from igibson/examples/vr/muvr/muvr_demo.py rename to igibson/examples/demo/vr_demos/muvr/muvr_demo.py index 10096fe67..0cb66eb5e 100644 --- a/igibson/examples/vr/muvr/muvr_demo.py +++ b/igibson/examples/demo/vr_demos/muvr/muvr_demo.py @@ -68,7 +68,7 @@ def run_muvr(mode="server", host="localhost", port="8885"): scene = InteractiveIndoorScene("Rs_int") if LOAD_PARTIAL: scene._set_first_n_objects(10) - s.import_scene(scene) + s.import_ig_scene(scene) # Default camera for non-VR MUVR users if not vr_settings.use_vr: @@ -85,10 +85,10 @@ def run_muvr(mode="server", host="localhost", port="8885"): mass_list = [5, 10, 20, 30] mustard_start = [-1, 1.55, 1.2] for i in range(len(mass_list)): - mustard = YCBObject("006_mustard_bottle", renderer_params={"use_pbr": False, "use_pbr_mapping": False}) - s.import_object(mustard) + mustard = YCBObject("006_mustard_bottle") + s.import_object(mustard, use_pbr=False, use_pbr_mapping=False, shadow_caster=True) mustard.set_position([mustard_start[0] + i * 0.2, mustard_start[1], mustard_start[2]]) - p.changeDynamics(mustard.get_body_id(), -1, mass=mass_list[i]) + p.changeDynamics(mustard.body_id, -1, mass=mass_list[i]) # Start the two agents at different points so they don't collide upon entering the scene if vr_settings.use_vr: @@ -102,7 +102,7 @@ def run_muvr(mode="server", host="localhost", port="8885"): vr_client = IGVRClient(host, port) vr_client.register_data(s, client_agent) # Disconnect pybullet since the client only renders - s.disconnect(release_renderer=False) + s.disconnect_pybullet() # Main networking loop while True: diff --git a/igibson/examples/vr/robot_embodiment/vr_demo_robot_explore.py b/igibson/examples/demo/vr_demos/robot_embodiment/vr_demo_robot_explore.py similarity index 87% rename from igibson/examples/vr/robot_embodiment/vr_demo_robot_explore.py rename to igibson/examples/demo/vr_demos/robot_embodiment/vr_demo_robot_explore.py index 074b38a76..268a2d778 100644 --- a/igibson/examples/vr/robot_embodiment/vr_demo_robot_explore.py +++ b/igibson/examples/demo/vr_demos/robot_embodiment/vr_demo_robot_explore.py @@ -9,6 +9,7 @@ from igibson.objects.ycb_object import YCBObject from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings from igibson.render.mesh_renderer.mesh_renderer_vr import VrSettings +from igibson.robots.fetch_vr_robot import FetchVR from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene from igibson.simulator import Simulator @@ -46,11 +47,7 @@ s = Simulator(mode="vr", rendering_settings=vr_rendering_settings, vr_settings=VrSettings(vr_fps=VR_FPS)) else: s = Simulator( - mode="gui_interactive", - image_width=960, - image_height=720, - device_idx=0, - rendering_settings=vr_rendering_settings, + mode="iggui", image_width=960, image_height=720, device_idx=0, rendering_settings=vr_rendering_settings ) s.viewer.min_cam_z = 1.0 @@ -58,7 +55,7 @@ # Turn this on when debugging to speed up loading if LOAD_PARTIAL: scene._set_first_n_objects(10) -s.import_scene(scene) +s.import_ig_scene(scene) if not VR_MODE: camera_pose = np.array([0, -3, 1.2]) @@ -73,10 +70,10 @@ mass_list = [5, 10, 100, 500] mustard_start = [-1, 1.55, 1.2] for i in range(len(mass_list)): - mustard = YCBObject("006_mustard_bottle", renderer_params={"use_pbr": False, "use_pbr_mapping": False}) - s.import_object(mustard) + mustard = YCBObject("006_mustard_bottle") + s.import_object(mustard, use_pbr=False, use_pbr_mapping=False, shadow_caster=True) mustard.set_position([mustard_start[0] + i * 0.2, mustard_start[1], mustard_start[2]]) - p.changeDynamics(mustard.get_body_id(), -1, mass=mass_list[i]) + p.changeDynamics(mustard.body_id, -1, mass=mass_list[i]) # Main simulation loop while True: diff --git a/igibson/examples/vr/robot_embodiment/vr_demo_robot_grasping.py b/igibson/examples/demo/vr_demos/robot_embodiment/vr_demo_robot_grasping.py similarity index 92% rename from igibson/examples/vr/robot_embodiment/vr_demo_robot_grasping.py rename to igibson/examples/demo/vr_demos/robot_embodiment/vr_demo_robot_grasping.py index bf41a2156..8e55e5006 100644 --- a/igibson/examples/vr/robot_embodiment/vr_demo_robot_grasping.py +++ b/igibson/examples/demo/vr_demos/robot_embodiment/vr_demo_robot_grasping.py @@ -11,6 +11,7 @@ from igibson.objects.ycb_object import YCBObject from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings from igibson.render.mesh_renderer.mesh_renderer_vr import VrSettings +from igibson.robots.fetch_vr_robot import FetchVR from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene from igibson.simulator import Simulator @@ -46,17 +47,13 @@ s = Simulator(mode="vr", rendering_settings=vr_rendering_settings, vr_settings=VrSettings(vr_fps=VR_FPS)) else: s = Simulator( - mode="gui_interactive", - image_width=960, - image_height=720, - device_idx=0, - rendering_settings=vr_rendering_settings, + mode="iggui", image_width=960, image_height=720, device_idx=0, rendering_settings=vr_rendering_settings ) s.viewer.min_cam_z = 1.0 scene = InteractiveIndoorScene("Rs_int") scene._set_first_n_objects(2) -s.import_scene(scene) +s.import_ig_scene(scene) p.setAdditionalSearchPath(pybullet_data.getDataPath()) if not VR_MODE: @@ -86,8 +83,8 @@ fpath = item[0] pos = item[1] orn = item[2] - item_ob = ArticulatedObject(fpath, scale=1, renderer_params={"use_pbr": False, "use_pbr_mapping": False}) - s.import_object(item_ob) + item_ob = ArticulatedObject(fpath, scale=1) + s.import_object(item_ob, use_pbr=False, use_pbr_mapping=False) item_ob.set_position(pos) item_ob.set_orientation(orn) diff --git a/igibson/examples/vr/test/muvr_lag_test.py b/igibson/examples/demo/vr_demos/test/muvr_lag_test.py similarity index 100% rename from igibson/examples/vr/test/muvr_lag_test.py rename to igibson/examples/demo/vr_demos/test/muvr_lag_test.py diff --git a/igibson/examples/vr/test/vr_condition_switch.py b/igibson/examples/demo/vr_demos/test/vr_condition_switch.py similarity index 92% rename from igibson/examples/vr/test/vr_condition_switch.py rename to igibson/examples/demo/vr_demos/test/vr_condition_switch.py index 039f57baf..a861b96ad 100644 --- a/igibson/examples/vr/test/vr_condition_switch.py +++ b/igibson/examples/demo/vr_demos/test/vr_condition_switch.py @@ -41,7 +41,7 @@ def main(): vr_settings = VrSettings() s = Simulator(mode="vr", rendering_settings=vr_rendering_settings, vr_settings=vr_settings) scene = InteractiveIndoorScene("Rs_int") - s.import_scene(scene) + s.import_ig_scene(scene) # Create a BehaviorRobot and it will handle all initialization and importing under-the-hood # Change USE_GRIPPER to switch between the BRHand and the BRGripper (see robots/behavior_robot.py for more details) @@ -57,10 +57,10 @@ def main(): mass_list = [5, 10, 100, 500] mustard_start = [-1, 1.55, 1.2] for i in range(len(mass_list)): - mustard = YCBObject("006_mustard_bottle", renderer_params={"use_pbr": False, "use_pbr_mapping": False}) - s.import_object(mustard) + mustard = YCBObject("006_mustard_bottle") + s.import_object(mustard, use_pbr=False, use_pbr_mapping=False, shadow_caster=True) mustard.set_position([mustard_start[0] + i * 0.2, mustard_start[1], mustard_start[2]]) - p.changeDynamics(mustard.get_body_id(), -1, mass=mass_list[i]) + p.changeDynamics(mustard.body_id, -1, mass=mass_list[i]) # Main simulation loop while True: diff --git a/igibson/examples/vr/test/vr_hand_geom_vis.py b/igibson/examples/demo/vr_demos/test/vr_hand_geom_vis.py similarity index 100% rename from igibson/examples/vr/test/vr_hand_geom_vis.py rename to igibson/examples/demo/vr_demos/test/vr_hand_geom_vis.py diff --git a/igibson/examples/vr/test/vr_overlay_color_test.py b/igibson/examples/demo/vr_demos/test/vr_overlay_color_test.py similarity index 94% rename from igibson/examples/vr/test/vr_overlay_color_test.py rename to igibson/examples/demo/vr_demos/test/vr_overlay_color_test.py index 23ed6723b..3ce2cdb61 100644 --- a/igibson/examples/vr/test/vr_overlay_color_test.py +++ b/igibson/examples/demo/vr_demos/test/vr_overlay_color_test.py @@ -81,7 +81,7 @@ def main(): ) # scene.load_object_categories(benchmark_names) - s.import_scene(scene) + s.import_ig_scene(scene) p.setAdditionalSearchPath(pybullet_data.getDataPath()) vr_agent = BehaviorRobot(s, use_gripper=USE_GRIPPER, normal_color=True) @@ -157,13 +157,11 @@ def main(): fpath, obj_type, scale, orn, pos, space_vec, space_val = obj_to_load[name] for i in range(len(masses)): if obj_type == "ycb": - handle = YCBObject(fpath, scale=scale, renderer_params={"use_pbr": False, "use_pbr_mapping": False}) + handle = YCBObject(fpath, scale=scale) elif obj_type == "pb": - handle = ArticulatedObject( - fpath, scale=scale, renderer_params={"use_pbr": False, "use_pbr_mapping": False} - ) + handle = ArticulatedObject(fpath, scale=scale) - s.import_object(handle) + s.import_object(handle, use_pbr=False, use_pbr_mapping=False) # Calculate new position along spacing vector new_pos = ( pos[0] + space_vec[0] * space_val * i, @@ -172,7 +170,7 @@ def main(): ) handle.set_position(new_pos) handle.set_orientation(orn) - p.changeDynamics(handle.get_body_id(), -1, mass=masses[i]) + p.changeDynamics(handle.body_id, -1, mass=masses[i]) # Text position/size is described in percentage of axes in screen space wrap_scroll_text = s.add_vr_overlay_text( diff --git a/igibson/examples/vr/test/vr_scroll_wrap_text_test.py b/igibson/examples/demo/vr_demos/test/vr_scroll_wrap_text_test.py similarity index 94% rename from igibson/examples/vr/test/vr_scroll_wrap_text_test.py rename to igibson/examples/demo/vr_demos/test/vr_scroll_wrap_text_test.py index ea6b930a8..5d839a8d0 100644 --- a/igibson/examples/vr/test/vr_scroll_wrap_text_test.py +++ b/igibson/examples/demo/vr_demos/test/vr_scroll_wrap_text_test.py @@ -81,7 +81,7 @@ def main(): ) # scene.load_object_categories(benchmark_names) - s.import_scene(scene) + s.import_ig_scene(scene) p.setAdditionalSearchPath(pybullet_data.getDataPath()) vr_agent = BehaviorRobot(s, use_gripper=USE_GRIPPER, normal_color=True) @@ -157,13 +157,11 @@ def main(): fpath, obj_type, scale, orn, pos, space_vec, space_val = obj_to_load[name] for i in range(len(masses)): if obj_type == "ycb": - handle = YCBObject(fpath, scale=scale, renderer_params={"use_pbr": False, "use_pbr_mapping": False}) + handle = YCBObject(fpath, scale=scale) elif obj_type == "pb": - handle = ArticulatedObject( - fpath, scale=scale, renderer_params={"use_pbr": False, "use_pbr_mapping": False} - ) + handle = ArticulatedObject(fpath, scale=scale) - s.import_object(handle) + s.import_object(handle, use_pbr=False, use_pbr_mapping=False) # Calculate new position along spacing vector new_pos = ( pos[0] + space_vec[0] * space_val * i, @@ -172,7 +170,7 @@ def main(): ) handle.set_position(new_pos) handle.set_orientation(orn) - p.changeDynamics(handle.get_body_id(), -1, mass=masses[i]) + p.changeDynamics(handle.body_id, -1, mass=masses[i]) # Text position/size is described in percentage of axes in screen space wrap_scroll_text = s.add_vr_overlay_text( diff --git a/igibson/examples/vr/vr_gaze_test.py b/igibson/examples/demo/vr_demos/vr_gaze_test.py similarity index 92% rename from igibson/examples/vr/vr_gaze_test.py rename to igibson/examples/demo/vr_demos/vr_gaze_test.py index 7ca1555ad..6ebcf4789 100644 --- a/igibson/examples/vr/vr_gaze_test.py +++ b/igibson/examples/demo/vr_demos/vr_gaze_test.py @@ -44,7 +44,7 @@ def main(): scene = InteractiveIndoorScene( "Rs_int", load_object_categories=["walls", "floors", "ceilings"], load_room_types=["kitchen"] ) - s.import_scene(scene) + s.import_ig_scene(scene) p.setAdditionalSearchPath(pybullet_data.getDataPath()) objects = [ @@ -65,8 +65,8 @@ def main(): fpath = item[0] pos = item[1] orn = item[2] - item_ob = ArticulatedObject(fpath, scale=1, renderer_params={"use_pbr": False, "use_pbr_mapping": False}) - s.import_object(item_ob) + item_ob = ArticulatedObject(fpath, scale=1) + s.import_object(item_ob, use_pbr=False, use_pbr_mapping=False) item_ob.set_position(pos) item_ob.set_orientation(orn) @@ -84,15 +84,13 @@ def main(): obj.set_position_orientation([1.1, 0.300000, 1.0], [0, 0, 0, 1]) bvr_robot = BehaviorRobot(s) - s.import_robot(bvr_robot) + s.import_behavior_robot(bvr_robot) s.register_main_vr_robot(bvr_robot) bvr_robot.set_position_orientation([0, 0, 1.5], [0, 0, 0, 1]) # Represents gaze - eye_marker = ArticulatedObject( - "sphere_small.urdf", scale=2, renderer_params={"use_pbr": False, "use_pbr_mapping": False} - ) - s.import_object(eye_marker) + eye_marker = ArticulatedObject("sphere_small.urdf", scale=2) + s.import_object(eye_marker, use_pbr=False, use_pbr_mapping=False) gaze_max_dist = 1.5 # Main simulation loop diff --git a/igibson/examples/vr/vr_simple_demo.py b/igibson/examples/demo/vr_demos/vr_simple_demo.py similarity index 95% rename from igibson/examples/vr/vr_simple_demo.py rename to igibson/examples/demo/vr_demos/vr_simple_demo.py index 8db68b37f..cb0071b04 100644 --- a/igibson/examples/vr/vr_simple_demo.py +++ b/igibson/examples/demo/vr_demos/vr_simple_demo.py @@ -43,7 +43,7 @@ def main(): scene = InteractiveIndoorScene( "Rs_int", load_object_categories=["walls", "floors", "ceilings"], load_room_types=["kitchen"] ) - s.import_scene(scene) + s.import_ig_scene(scene) p.setAdditionalSearchPath(pybullet_data.getDataPath()) objects = [ @@ -64,8 +64,8 @@ def main(): fpath = item[0] pos = item[1] orn = item[2] - item_ob = ArticulatedObject(fpath, scale=1, renderer_params={"use_pbr": False, "use_pbr_mapping": False}) - s.import_object(item_ob) + item_ob = ArticulatedObject(fpath, scale=1) + s.import_object(item_ob, use_pbr=False, use_pbr_mapping=False) item_ob.set_position(pos) item_ob.set_orientation(orn) @@ -83,7 +83,7 @@ def main(): obj.set_position_orientation([1.1, 0.300000, 1.0], [0, 0, 0, 1]) bvr_robot = BehaviorRobot(s) - s.import_robot(bvr_robot) + s.import_behavior_robot(bvr_robot) s.register_main_vr_robot(bvr_robot) bvr_robot.set_position_orientation([0, 0, 1.5], [0, 0, 0, 1]) diff --git a/igibson/examples/web_ui/sampling_feedback_test.py b/igibson/examples/demo/web_ui/sampling_feedback_test.py similarity index 100% rename from igibson/examples/web_ui/sampling_feedback_test.py rename to igibson/examples/demo/web_ui/sampling_feedback_test.py diff --git a/igibson/examples/web_ui/sampling_ui.py b/igibson/examples/demo/web_ui/sampling_ui.py similarity index 77% rename from igibson/examples/web_ui/sampling_ui.py rename to igibson/examples/demo/web_ui/sampling_ui.py index 46c8b14d1..51df1a9f6 100644 --- a/igibson/examples/web_ui/sampling_ui.py +++ b/igibson/examples/demo/web_ui/sampling_ui.py @@ -17,11 +17,11 @@ from flask_cors import CORS import igibson -from igibson.envs.igibson_env import iGibsonEnv +from igibson.activity.activity_base import iGBEHAVIORActivityInstance from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings from igibson.scenes.gibson_indoor_scene import StaticIndoorScene from igibson.simulator import Simulator -from igibson.utils.utils import parse_config, restoreState +from igibson.utils.utils import parse_config interactive = True NUM_REQUIRED_SUCCESSFUL_SCENES = 3 @@ -124,7 +124,7 @@ def sample(self, behavior_activity, bddl, blocking=True): :param bddl (str): the bddl being sampled in the environment :param blocking (bool): whether to wait for the result - :return (bool, str): (success, feedback) from the sampling process + :return (bool, dict): (success, feedback) from the sampling process """ self.last_active_time = time.time() promise = self.call("sample", behavior_activity, bddl) @@ -213,6 +213,7 @@ def __init__(self): self.s = Simulator(mode="headless", image_width=400, image_height=400, rendering_settings=settings) scene = StaticIndoorScene("Rs") self.s.import_scene(scene) + # self.s.import_ig_scene(scene) def step(self, a): self.s.step() @@ -225,67 +226,116 @@ def close(self): class ToyEnvInt(object): def __init__(self, scene="Rs_int"): - config_file = os.path.join(igibson.example_config_path, "behavior_vr.yaml") - env_config = parse_config(config_file) - env_config["scene_id"] = scene - env_config["task"] = "trivial" - env_config["task_id"] = 0 - env_config["online_sampling"] = True - env_config["load_clutter"] = False + bddl.set_backend("iGibson") + self.task = iGBEHAVIORActivityInstance("trivial", activity_definition=0) + settings = MeshRendererSettings(texture_scale=0.01) - self.env = iGibsonEnv(config_file=env_config, mode="headless", rendering_settings=settings) + simulator = Simulator(mode="headless", image_width=400, image_height=400, rendering_settings=settings) + self.task.initialize_simulator( + simulator=simulator, + scene_id=scene, + mode="headless", + load_clutter=False, + should_debug_sampling=False, + scene_kwargs={}, + online_sampling=True, + ) self.state_id = p.saveState() self.num_body_ids = p.getNumBodies() - self.num_particle_systems = len(self.env.simulator.particle_systems) + self.num_particle_systems = len(self.task.simulator.particle_systems) def step(self, a): pass def restore_scene(self): - for sim_obj in self.env.task.newly_added_objects: - self.env.scene.remove_object(sim_obj) + for sim_obj in self.task.newly_added_objects: + self.task.scene.remove_object(sim_obj) - self.env.simulator.particle_systems = self.env.simulator.particle_systems[: self.num_particle_systems] + self.task.simulator.particle_systems = self.task.simulator.particle_systems[: self.num_particle_systems] for body_id in range(self.num_body_ids, p.getNumBodies()): p.removeBody(body_id) - restoreState(self.state_id) + p.restoreState(self.state_id) def sample(self, behavior_activity, bddl): try: - self.env.task.update_problem(behavior_activity, 0, predefined_problem=bddl) + self.task.update_problem(behavior_activity, "tester", predefined_problem=bddl) + self.task.object_scope["agent.n.01_1"] = self.task.agent.parts["body"] except UncontrolledCategoryError: accept_scene = False - feedback = "Goal state has uncontrolled categories." + feedback = { + "init_success": "no", + "goal_success": "no", + "init_feedback": "Cannot check until goal state is fixed.", + "goal_feedback": "Goal state has uncontrolled categories.", + } return accept_scene, feedback except UnsupportedPredicateError as e: accept_scene = False - feedback = f"We don't yet support the [{e.predicate}] adjective for any objects. We will soon!" + feedback = { + "init_success": "no", + "goal_success": "no", + "init_feedback": f"We don't yet support the [{e.predicate}] adjective for any objects. We will soon!", + "goal_feedback": "", + } return accept_scene, feedback except AssertionError as message: - accept_scene = False if message == "No ground goal options": - feedback = ( - "The goal conditions are logically impossible (there is no solution). Check for a contradiction (e.g. asking for the floor to be stained and not stained at the same time).", - ) + accept_scene = False + feedback = { + "init_success": "no", + "goal_success": "no", + "init_feedback": "", + "goal_feedback": "The goal conditions are logically impossible (there is no solution). Check for a contradiction (e.g. asking for the floor to be stained and not stained at the same time).", + } else: - feedback = "Let Sanjana know there was an indeterminate assertion error during problem update." + accept_scene = False + feedback = { + "init_success": "no", + "goal_success": "no", + "init_feedback": f"Let Sanjana know there was an indeterminate assertion error during problem update.", + "goal_feedback": "", + } return accept_scene, feedback try: - accept_scene, feedback = self.env.task.initialize(self.env) + accept_scene, feedback = self.task.check_scene() except AssertionError as message: - accept_scene = False - feedback = f"Let Sanjana know there was an assertion error during scene checking/sampling: {str(message)}" + if "Invalid" in str(message): + accept_scene = False + feedback = { + "init_success": "no", + "goal_success": "no", + "init_feedback": f"We do not currently support {str(message).split(' ')[3]}. Please try a different object!", + "goal_feedback": "", + } + else: + accept_scene = False + feedback = { + "init_success": "no", + "goal_success": "no", + "init_feedback": f"Let Sanjana know there was an indeterminate assertion error during scene checking.", + "goal_feedback": "", + } + return accept_scene, feedback + + if not accept_scene: + self.restore_scene() + return accept_scene, feedback + + accept_scene, feedback = self.task.sample(kinematic_only=True) + if not accept_scene: self.restore_scene() return accept_scene, feedback self.restore_scene() + + # self.last_active_time = time.time() return accept_scene, feedback def close(self): - self.env.close() + self.task.simulator.disconnect() class iGFlask(Flask): @@ -406,7 +456,14 @@ def check_sampling(): success, feedback = app.envs[new_unique_id].sample(behavior_activity, bddl) if success: num_successful_scenes += 1 - feedback_instances.append(feedback) + """ + init_success, goal_success: one of the three values ['yes', 'no', 'untested'] + init_feedback, goal_feedback: feedback for the initial and goal conditions. They will be empty strings + if the conditions are not tested or the conditions are sampled successfully. + """ + feedback_instances.append( + (feedback["init_success"], feedback["goal_success"], feedback["init_feedback"], feedback["goal_feedback"]) + ) success = num_successful_scenes >= min(NUM_REQUIRED_SUCCESSFUL_SCENES, len(ids)) full_feedback = feedback_instances diff --git a/igibson/examples/web_ui/static/Beechwood_0.gif b/igibson/examples/demo/web_ui/static/Beechwood_0.gif similarity index 100% rename from igibson/examples/web_ui/static/Beechwood_0.gif rename to igibson/examples/demo/web_ui/static/Beechwood_0.gif diff --git a/igibson/examples/web_ui/static/Merom_1.gif b/igibson/examples/demo/web_ui/static/Merom_1.gif similarity index 100% rename from igibson/examples/web_ui/static/Merom_1.gif rename to igibson/examples/demo/web_ui/static/Merom_1.gif diff --git a/igibson/examples/web_ui/static/Rs.gif b/igibson/examples/demo/web_ui/static/Rs.gif similarity index 100% rename from igibson/examples/web_ui/static/Rs.gif rename to igibson/examples/demo/web_ui/static/Rs.gif diff --git a/igibson/examples/web_ui/static/fetch.jpg b/igibson/examples/demo/web_ui/static/fetch.jpg similarity index 100% rename from igibson/examples/web_ui/static/fetch.jpg rename to igibson/examples/demo/web_ui/static/fetch.jpg diff --git a/igibson/examples/web_ui/static/igibson_logo.png b/igibson/examples/demo/web_ui/static/igibson_logo.png similarity index 100% rename from igibson/examples/web_ui/static/igibson_logo.png rename to igibson/examples/demo/web_ui/static/igibson_logo.png diff --git a/igibson/examples/web_ui/static/turtlebot.jpg b/igibson/examples/demo/web_ui/static/turtlebot.jpg similarity index 100% rename from igibson/examples/web_ui/static/turtlebot.jpg rename to igibson/examples/demo/web_ui/static/turtlebot.jpg diff --git a/igibson/examples/web_ui/templates/demo.html b/igibson/examples/demo/web_ui/templates/demo.html similarity index 100% rename from igibson/examples/web_ui/templates/demo.html rename to igibson/examples/demo/web_ui/templates/demo.html diff --git a/igibson/examples/web_ui/templates/finished.jpg b/igibson/examples/demo/web_ui/templates/finished.jpg similarity index 100% rename from igibson/examples/web_ui/templates/finished.jpg rename to igibson/examples/demo/web_ui/templates/finished.jpg diff --git a/igibson/examples/web_ui/templates/index.html b/igibson/examples/demo/web_ui/templates/index.html similarity index 100% rename from igibson/examples/web_ui/templates/index.html rename to igibson/examples/demo/web_ui/templates/index.html diff --git a/igibson/examples/web_ui/templates/loading.jpg b/igibson/examples/demo/web_ui/templates/loading.jpg similarity index 100% rename from igibson/examples/web_ui/templates/loading.jpg rename to igibson/examples/demo/web_ui/templates/loading.jpg diff --git a/igibson/examples/web_ui/templates/sampling_index.html b/igibson/examples/demo/web_ui/templates/sampling_index.html similarity index 100% rename from igibson/examples/web_ui/templates/sampling_index.html rename to igibson/examples/demo/web_ui/templates/sampling_index.html diff --git a/igibson/examples/web_ui/templates/waiting.jpg b/igibson/examples/demo/web_ui/templates/waiting.jpg similarity index 100% rename from igibson/examples/web_ui/templates/waiting.jpg rename to igibson/examples/demo/web_ui/templates/waiting.jpg diff --git a/igibson/examples/web_ui/web_ui.py b/igibson/examples/demo/web_ui/web_ui.py similarity index 98% rename from igibson/examples/web_ui/web_ui.py rename to igibson/examples/demo/web_ui/web_ui.py index 6d1d1fe5d..7e06416eb 100644 --- a/igibson/examples/web_ui/web_ui.py +++ b/igibson/examples/demo/web_ui/web_ui.py @@ -16,8 +16,8 @@ import igibson from igibson.objects.ycb_object import YCBObject from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings -from igibson.robots.fetch import Fetch -from igibson.robots.turtlebot import Turtlebot +from igibson.robots.fetch_robot import Fetch +from igibson.robots.turtlebot_robot import Turtlebot from igibson.scenes.gibson_indoor_scene import StaticIndoorScene from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene from igibson.simulator import Simulator @@ -203,6 +203,7 @@ def __init__(self): self.s = Simulator(mode="headless", image_width=400, image_height=400, rendering_settings=settings) scene = StaticIndoorScene("Rs") self.s.import_scene(scene) + # self.s.import_ig_scene(scene) self.robot = Turtlebot(config) self.s.import_robot(self.robot) @@ -254,7 +255,7 @@ def __init__(self, robot="turtlebot", scene="Rs_int"): ) self.s = Simulator(mode="headless", image_width=400, image_height=400, rendering_settings=settings) - self.s.import_scene(scene) + self.s.import_ig_scene(scene) if robot == "turtlebot": self.robot = Turtlebot(config) diff --git a/igibson/examples/environments/config_selector.py b/igibson/examples/environments/config_selector.py deleted file mode 100644 index ee1f5da78..000000000 --- a/igibson/examples/environments/config_selector.py +++ /dev/null @@ -1,52 +0,0 @@ -import logging -import os -from random import random -from sys import platform - -import yaml - -import igibson -from igibson.envs.igibson_env import iGibsonEnv -from igibson.render.profiler import Profiler -from igibson.utils.assets_utils import folder_is_hidden, get_available_ig_scenes -from igibson.utils.utils import let_user_pick - - -def main(random_selection=False): - """ - Prompts the user to select any available interactive scene and loads a turtlebot into it. - It steps the environment 100 times with random actions sampled from the action space, - using the Gym interface, resetting it 10 times. - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - - ig_config_path = igibson.example_config_path - available_configs = sorted( - [f for f in os.listdir(ig_config_path) if (not folder_is_hidden(f) and f != "background")] - ) - config_id = available_configs[let_user_pick(available_configs, random_selection=random_selection) - 1] - config_filename = os.path.join(igibson.example_config_path, config_id) - config_data = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader) - # Reduce texture scale for Mac. - if platform == "darwin": - config_data["texture_scale"] = 0.5 - config_data["image_width"] = 512 - config_data["image_height"] = 512 - config_data["vertical_fov"] = 60 - # config_data["load_object_categories"] = [] # Uncomment this line to accelerate loading with only the building - env = iGibsonEnv(config_file=config_data, mode="gui_interactive") - for j in range(10): - logging.info("Resetting environment") - env.reset() - for i in range(100): - with Profiler("Environment action step"): - action = env.action_space.sample() * 0.05 - state, reward, done, info = env.step(action) - if done: - logging.info("Episode finished after {} timesteps".format(i + 1)) - break - env.close() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/environments/env_int_example.py b/igibson/examples/environments/env_int_example.py deleted file mode 100644 index 164479964..000000000 --- a/igibson/examples/environments/env_int_example.py +++ /dev/null @@ -1,43 +0,0 @@ -import logging -import os -from sys import platform - -import yaml - -import igibson -from igibson.envs.igibson_env import iGibsonEnv -from igibson.render.profiler import Profiler -from igibson.utils.assets_utils import download_assets - - -def main(): - """ - Creates an iGibson environment from a config file with a turtlebot in Rs_int (interactive). - It steps the environment 100 times with random actions sampled from the action space, - using the Gym interface, resetting it 10 times. - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - # If they have not been downloaded before, download assets - download_assets() - config_filename = os.path.join(igibson.example_config_path, "turtlebot_nav.yaml") - config_data = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader) - # Reduce texture scale for Mac. - if platform == "darwin": - config_data["texture_scale"] = 0.5 - # config_data["load_object_categories"] = [] # Uncomment this line to accelerate loading with only the building - env = iGibsonEnv(config_file=config_data, mode="gui_interactive") - for j in range(10): - logging.info("Resetting environment") - env.reset() - for i in range(100): - with Profiler("Environment action step"): - action = env.action_space.sample() - state, reward, done, info = env.step(action) - if done: - logging.info("Episode finished after {} timesteps".format(i + 1)) - break - env.close() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/environments/env_int_selector.py b/igibson/examples/environments/env_int_selector.py deleted file mode 100644 index c407eaa7c..000000000 --- a/igibson/examples/environments/env_int_selector.py +++ /dev/null @@ -1,45 +0,0 @@ -import logging -import os -from random import random -from sys import platform - -import yaml - -import igibson -from igibson.envs.igibson_env import iGibsonEnv -from igibson.render.profiler import Profiler -from igibson.utils.assets_utils import get_available_ig_scenes -from igibson.utils.utils import let_user_pick - - -def main(random_selection=False): - """ - Prompts the user to select any available interactive scene and loads a turtlebot into it. - It steps the environment 100 times with random actions sampled from the action space, - using the Gym interface, resetting it 10 times. - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - config_filename = os.path.join(igibson.example_config_path, "turtlebot_nav.yaml") - config_data = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader) - # Reduce texture scale for Mac. - if platform == "darwin": - config_data["texture_scale"] = 0.5 - # config_data["load_object_categories"] = [] # Uncomment this line to accelerate loading with only the building - available_ig_scenes = get_available_ig_scenes() - scene_id = available_ig_scenes[let_user_pick(available_ig_scenes, random_selection=random_selection) - 1] - env = iGibsonEnv(config_file=config_data, scene_id=scene_id, mode="gui_interactive") - for j in range(10): - logging.info("Resetting environment") - env.reset() - for i in range(100): - with Profiler("Environment action step"): - action = env.action_space.sample() - state, reward, done, info = env.step(action) - if done: - logging.info("Episode finished after {} timesteps".format(i + 1)) - break - env.close() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/environments/env_nonint_example.py b/igibson/examples/environments/env_nonint_example.py deleted file mode 100644 index de27dd96c..000000000 --- a/igibson/examples/environments/env_nonint_example.py +++ /dev/null @@ -1,43 +0,0 @@ -import logging -import os -from sys import platform - -import yaml - -import igibson -from igibson.envs.igibson_env import iGibsonEnv -from igibson.render.profiler import Profiler -from igibson.utils.assets_utils import download_assets, download_demo_data - - -def main(): - """ - Creates an iGibson environment from a config file with a turtlebot in Rs (not interactive). - It steps the environment 100 times with random actions sampled from the action space, - using the Gym interface, resetting it 10 times. - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - # If they have not been downloaded before, download assets and Rs Gibson (non-interactive) models - download_assets() - download_demo_data() - config_filename = os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml") - config_data = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader) - # Reduce texture scale for Mac. - if platform == "darwin": - config_data["texture_scale"] = 0.5 - env = iGibsonEnv(config_file=config_data, mode="gui_interactive") - for j in range(10): - logging.info("Resetting environment") - env.reset() - for i in range(100): - with Profiler("Environment action step"): - action = env.action_space.sample() - state, reward, done, info = env.step(action) - if done: - logging.info("Episode finished after {} timesteps".format(i + 1)) - break - env.close() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/environments/env_nonint_selector.py b/igibson/examples/environments/env_nonint_selector.py deleted file mode 100644 index 99fdabf22..000000000 --- a/igibson/examples/environments/env_nonint_selector.py +++ /dev/null @@ -1,44 +0,0 @@ -import logging -import os -import random -from sys import platform - -import yaml - -import igibson -from igibson.envs.igibson_env import iGibsonEnv -from igibson.render.profiler import Profiler -from igibson.utils.assets_utils import get_available_g_scenes -from igibson.utils.utils import let_user_pick - - -def main(random_selection=False): - """ - Prompts the user to select any available non-interactive scene and loads a turtlebot into it. - It steps the environment 100 times with random actions sampled from the action space, - using the Gym interface, resetting it 10 times. - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - config_filename = os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml") - config_data = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader) - # Reduce texture scale for Mac. - if platform == "darwin": - config_data["texture_scale"] = 0.5 - available_g_scenes = get_available_g_scenes() - scene_id = available_g_scenes[let_user_pick(available_g_scenes, random_selection=random_selection) - 1] - env = iGibsonEnv(config_file=config_filename, scene_id=scene_id, mode="gui_interactive") - for j in range(10): - logging.info("Resetting environment") - env.reset() - for i in range(100): - with Profiler("Environment action step"): - action = env.action_space.sample() - state, reward, done, info = env.step(action) - if done: - logging.info("Episode finished after {} timesteps".format(i + 1)) - break - env.close() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/example_selector.py b/igibson/examples/example_selector.py deleted file mode 100644 index a832c9bcb..000000000 --- a/igibson/examples/example_selector.py +++ /dev/null @@ -1,94 +0,0 @@ -import importlib -import os -import pkgutil -import signal -import string -from multiprocessing import Process - -import igibson.examples as examples -from igibson.utils.utils import let_user_pick - -TIMEOUT = 3 - - -def interrupted(signum, frame): - raise ValueError("Time-up for keyboard input") - - -signal.signal(signal.SIGALRM, interrupted) - - -def timed_input(): - try: - foo = input("Press ENTER to execute or 's'+ENTER to skip (or wait 3 secs to execute)\n") - return foo == "s" - except ValueError: - # timeout - return - - -def main(): - """ - Creates an iGibson environment from a config file with a turtlebot in Rs_int (interactive). - It steps the environment 100 times with random actions sampled from the action space, - using the Gym interface, resetting it 10 times. - """ - examples_list = ["help", "all"] - for kk in pkgutil.walk_packages(examples.__path__, examples.__name__ + "."): - if not kk.ispkg and kk.name[17:] != "example_selector": - examples_list += [kk.name[17:]] - - selected_demo = 0 - logo = ( - " _ _____ _ _" + "\n" + "(_) / ____|(_)| |" + "\n" - " _ | | __ _ | |__ ___ ___ _ __" + "\n" - "| || | |_ || || '_ \ / __| / _ \ | '_ \\" + "\n" - "| || |__| || || |_) |\__ \| (_) || | | |" + "\n" - "|_| \_____||_||_.__/ |___/ \___/ |_| |_|" + "\n" - ) - while selected_demo == 0: - print(logo) - print("Select a demo/example, 'help' for information about a specific demo, or 'all' to run all demos:") - selected_demo = let_user_pick(examples_list, print_intro=False) - 1 - if selected_demo == 0: - help_demo = int(input("\nProvide the number of the example you need information for: ")) - 1 - if help_demo == 0: - print("Print the description of a demo/example") - elif help_demo == 1: - print("Execute all demos/examples in order") - else: - module_help = importlib.import_module("igibson.examples." + examples_list[help_demo]) - print(module_help.main.__doc__) - input("Press enter") - elif selected_demo == 1: - print("Executing all demos") - for idx in range(2, len(examples_list)): - print("*" * 80) - print("*" * 80) - print(logo) - print("*" * 80) - print("*" * 80) - signal.alarm(TIMEOUT) - s = timed_input() - # disable the alarm after success - signal.alarm(0) - if s: - continue - print("Executing " + examples_list[idx]) - - i = importlib.import_module("igibson.examples." + examples_list[idx]) - if "selector" in examples_list[idx]: - p = Process(target=i.main, args=("random=True",)) - else: - p = Process(target=i.main) - p.start() - p.join() - print("Ended " + examples_list[idx]) - else: - print("Executing " + examples_list[selected_demo]) - i = importlib.import_module("igibson.examples." + examples_list[selected_demo]) - i.main() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/scenes/scenescan2igibson/README.md b/igibson/examples/gen_data/README.md similarity index 100% rename from igibson/examples/scenes/scenescan2igibson/README.md rename to igibson/examples/gen_data/README.md diff --git a/igibson/examples/scenes/scenescan2igibson/combine_texture.ipynb b/igibson/examples/gen_data/combine_texture.ipynb similarity index 100% rename from igibson/examples/scenes/scenescan2igibson/combine_texture.ipynb rename to igibson/examples/gen_data/combine_texture.ipynb diff --git a/igibson/examples/scenes/scenescan2igibson/combine_texture.py b/igibson/examples/gen_data/combine_texture.py similarity index 100% rename from igibson/examples/scenes/scenescan2igibson/combine_texture.py rename to igibson/examples/gen_data/combine_texture.py diff --git a/igibson/examples/scenes/scenescan2igibson/generate_floor_map.py b/igibson/examples/gen_data/generate_floor_map.py similarity index 100% rename from igibson/examples/scenes/scenescan2igibson/generate_floor_map.py rename to igibson/examples/gen_data/generate_floor_map.py diff --git a/igibson/examples/scenes/scenescan2igibson/generate_traversable_map.py b/igibson/examples/gen_data/generate_traversable_map.py similarity index 100% rename from igibson/examples/scenes/scenescan2igibson/generate_traversable_map.py rename to igibson/examples/gen_data/generate_traversable_map.py diff --git a/igibson/examples/learning/demo_collection_example.py b/igibson/examples/learning/demo_collection_example.py deleted file mode 100644 index df97e5b06..000000000 --- a/igibson/examples/learning/demo_collection_example.py +++ /dev/null @@ -1,204 +0,0 @@ -import argparse -import datetime -import logging -import os -import sys -import tempfile - -import numpy as np - -import igibson -from igibson.envs.igibson_env import iGibsonEnv -from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings -from igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher -from igibson.utils.ig_logging import IGLogWriter -from igibson.utils.utils import parse_config - - -def main(): - """ - Example of how to save a demo of a task - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - args = parse_args() - collect_demo( - args.scene, - args.task, - args.task_id, - args.instance_id, - args.log_path, - args.disable_save, - args.disable_scene_cache, - args.profile, - args.config, - ) - - -def parse_args(): - scene_choices = [ - "Beechwood_0_int", - "Beechwood_1_int", - "Benevolence_0_int", - "Benevolence_1_int", - "Benevolence_2_int", - "Ihlen_0_int", - "Ihlen_1_int", - "Merom_0_int", - "Merom_1_int", - "Pomaria_0_int", - "Pomaria_1_int", - "Pomaria_2_int", - "Rs_int", - "Wainscott_0_int", - "Wainscott_1_int", - ] - - task_id_choices = [0, 1] - parser = argparse.ArgumentParser(description="Run and collect a demo of a task") - parser.add_argument( - "--scene", - type=str, - choices=scene_choices, - default="Rs_int", - nargs="?", - help="Scene name/ID matching iGibson interactive scenes.", - ) - parser.add_argument( - "--task", - type=str, - required=False, - default="cleaning_out_drawers", - nargs="?", - help="Name of task to collect a demo of. If it a BEHAVIOR activity, the name should be one of the official" - " activity labels, matching one of the folders in the BDDL repository.", - ) - parser.add_argument( - "--task_id", - type=int, - required=False, - default=0, - choices=task_id_choices, - nargs="?", - help="[Only for BEHAVIOR activities] Integer ID identifying the BDDL definition of the BEHAVIOR activity. " - "Since we only provide two definitions per activity, the ID should be 0 or 1.", - ) - parser.add_argument( - "--instance_id", - type=int, - required=False, - default=0, - help="[Only for BEHAVIOR activities] Instance of BEHAVIOR activity (particular URDF corresponding to " - "an instantiation of a BDDL activity definition)", - ) - demo_file = os.path.join(tempfile.gettempdir(), "demo.hdf5") - parser.add_argument("--log_path", type=str, default=demo_file, help="Path (and filename) of log file") - parser.add_argument("--disable_save", action="store_true", help="Whether to disable saving logfiles.") - parser.add_argument( - "--disable_scene_cache", action="store_true", help="Whether to disable using pre-initialized scene caches." - ) - parser.add_argument("--profile", action="store_true", help="Whether to print profiling data.") - parser.add_argument( - "--config", - help="which config file to use [default: use yaml files in examples/configs]", - default=os.path.join(igibson.example_config_path, "behavior_vr.yaml"), - ) - return parser.parse_args() - - -def collect_demo( - scene, - task, - task_id=0, - instance_id=0, - log_path=None, - disable_save=False, - disable_scene_cache=False, - profile=False, - config_file=os.path.join(igibson.example_config_path, "behavior_vr.yaml"), -): - """ """ - # HDR files for PBR rendering - hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr") - hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr") - light_modulation_map_filename = os.path.join( - igibson.ig_dataset_path, "scenes", "Rs_int", "layout", "floor_lighttype_0.png" - ) - background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg") - - # Rendering settings - rendering_settings = MeshRendererSettings( - optimized=True, - fullscreen=False, - env_texture_filename=hdr_texture, - env_texture_filename2=hdr_texture2, - env_texture_filename3=background_texture, - light_modulation_map_filename=light_modulation_map_filename, - enable_shadow=True, - enable_pbr=True, - msaa=False, - light_dimming_factor=1.0, - ) - - config = parse_config(config_file) - config["task"] = task - config["task_id"] = task_id - config["scene_id"] = scene - config["instance_id"] = instance_id - config["online_sampling"] = disable_scene_cache - config["load_clutter"] = True - env = iGibsonEnv( - config_file=config, - mode="headless", - action_timestep=1 / 30.0, - physics_timestep=1 / 300.0, - rendering_settings=rendering_settings, - ) - env.reset() - robot = env.robots[0] - - log_writer = None - if not disable_save: - timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") - if log_path is None: - log_path = "{}_{}_{}_{}_{}.hdf5".format(task, task_id, scene, instance_id, timestamp) - log_writer = IGLogWriter( - env.simulator, - log_filepath=log_path, - task=env.task, - store_vr=False, - vr_robot=robot, - profiling_mode=profile, - filter_objects=True, - ) - log_writer.set_up_data_storage() - log_writer.hf.attrs["/metadata/instance_id"] = instance_id - - steps = 0 - - # Main recording loop - while True: - if robot.__class__.__name__ == "BehaviorRobot" and steps < 2: - # Use the first 2 steps to activate BehaviorRobot - action = np.zeros((28,)) - action[19] = 1 - action[27] = 1 - else: - action = np.random.uniform(-0.01, 0.01, size=(robot.action_dim,)) - - # Execute the action - state, reward, done, info = env.step(action) - - if log_writer and not disable_save: - log_writer.process_frame() - - if done: - break - - if log_writer and not disable_save: - log_writer.end_log_session() - - env.close() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/object_states/cleaning_demo.py b/igibson/examples/object_states/cleaning_demo.py deleted file mode 100644 index e0e882188..000000000 --- a/igibson/examples/object_states/cleaning_demo.py +++ /dev/null @@ -1,60 +0,0 @@ -import logging - -from igibson import object_states -from igibson.objects.ycb_object import YCBObject -from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene -from igibson.simulator import Simulator - - -def main(): - """ - Demo of a cleaning task - Loads an interactive scene and sets all object surface to be dirty - Loads also a cleaning tool that can be soaked in water and used to clean objects if moved manually - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - s = Simulator(mode="gui_interactive", image_width=1280, image_height=720, device_idx=0) - # Set a better viewing direction - s.viewer.initial_pos = [-0.7, 2.4, 1.1] - s.viewer.initial_view_direction = [0, 0.9, -0.5] - s.viewer.reset_viewer() - # Only load a few objects - load_object_categories = ["breakfast_table", "bottom_cabinet", "sink", "stove", "fridge", "window"] - scene = InteractiveIndoorScene( - "Rs_int", texture_randomization=False, object_randomization=False, load_object_categories=load_object_categories - ) - s.import_scene(scene) - - # Load a cleaning tool - block = YCBObject(name="036_wood_block", abilities={"soakable": {}, "cleaningTool": {}}) - s.import_object(block) - block.set_position([-1.4, 3.0, 1.5]) - - # Set everything that can go dirty and activate the water sources - stateful_objects = set( - scene.get_objects_with_state(object_states.Dusty) - + scene.get_objects_with_state(object_states.Stained) - + scene.get_objects_with_state(object_states.WaterSource) - ) - for obj in stateful_objects: - if object_states.Dusty in obj.states: - logging.info("Setting object to be Dusty") - obj.states[object_states.Dusty].set_value(True) - - if object_states.Stained in obj.states: - logging.info("Setting object to be Stained") - obj.states[object_states.Stained].set_value(True) - - if object_states.WaterSource in obj.states and object_states.ToggledOn in obj.states: - logging.info("Setting water source object to be ToggledOn") - obj.states[object_states.ToggledOn].set_value(True) - - try: - while True: - s.step() - finally: - s.disconnect() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/object_states/cleaning_demo_with_reset.py b/igibson/examples/object_states/cleaning_demo_with_reset.py deleted file mode 100644 index 01e684096..000000000 --- a/igibson/examples/object_states/cleaning_demo_with_reset.py +++ /dev/null @@ -1,125 +0,0 @@ -import logging -import os - -import numpy as np -import pybullet as p - -from igibson import object_states -from igibson.objects.articulated_object import URDFObject -from igibson.scenes.empty_scene import EmptyScene -from igibson.simulator import Simulator -from igibson.utils.assets_utils import get_ig_model_path -from igibson.utils.utils import restoreState - - -def main(): - """ - Demo of a cleaning task that resets after everything has been cleaned - To save/load state it combines pybullet save/load functionality and additional iG functions for the extended states - Loads an empty scene with a sink, a dusty table and a dirty and stained bowl, and a cleaning tool - If everything is cleaned, or after N steps, the scene resets to the initial state - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - s = Simulator(mode="gui_interactive", image_width=1280, image_height=720) - # Set a better viewing direction - s.viewer.initial_pos = [-0.5, -0.4, 1.5] - s.viewer.initial_view_direction = [0.7, 0.1, -0.7] - s.viewer.reset_viewer() - scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1]) - s.import_scene(scene) - - # Load sink ON - model_path = os.path.join(get_ig_model_path("sink", "sink_1"), "sink_1.urdf") - sink = URDFObject( - filename=model_path, - category="sink", - name="sink_1", - scale=np.array([0.8, 0.8, 0.8]), - abilities={"toggleable": {}, "waterSource": {}}, - ) - s.import_object(sink) - sink.set_position([1, 1, 0.8]) - assert sink.states[object_states.ToggledOn].set_value(True) - - # Load cleaning tool - model_path = get_ig_model_path("scrub_brush", "scrub_brush_000") - model_filename = os.path.join(model_path, "scrub_brush_000.urdf") - max_bbox = [0.1, 0.1, 0.1] - avg = {"size": max_bbox, "density": 67.0} - brush = URDFObject( - filename=model_filename, - category="scrub_brush", - name="scrub_brush", - avg_obj_dims=avg, - fit_avg_dim_volume=True, - model_path=model_path, - ) - s.import_object(brush) - brush.set_position([0, -2, 0.4]) - - # Load table with dust - model_path = os.path.join(get_ig_model_path("breakfast_table", "19203"), "19203.urdf") - desk = URDFObject( - filename=model_path, - category="breakfast_table", - name="19898", - scale=np.array([0.8, 0.8, 0.8]), - abilities={"dustyable": {}}, - ) - s.import_object(desk) - desk.set_position([1, -2, 0.4]) - assert desk.states[object_states.Dusty].set_value(True) - - # Load a bowl with stains - model_path = os.path.join(get_ig_model_path("bowl", "68_0"), "68_0.urdf") - bowl = URDFObject(filename=model_path, category="bowl", name="bowl", abilities={"dustyable": {}, "stainable": {}}) - s.import_object(bowl) - assert bowl.states[object_states.OnTop].set_value(desk, True, use_ray_casting_method=True) - assert bowl.states[object_states.Stained].set_value(True) - - # Save the initial state. - pb_initial_state = p.saveState() # Save pybullet state (kinematics) - brush_initial_extended_state = brush.dump_state() # Save brush extended state - logging.info(brush_initial_extended_state) - desk_initial_extended_state = desk.dump_state() # Save desk extended state - logging.info(desk_initial_extended_state) - bowl_initial_extended_state = bowl.dump_state() # Save bowl extended state - logging.info(bowl_initial_extended_state) - - # Main simulation loop. - max_steps = 1000 - try: - while True: - # Keep stepping until table or bowl are clean, or we reach 1000 steps - steps = 0 - while ( - desk.states[object_states.Dusty].get_value() - and bowl.states[object_states.Stained].get_value() - and steps != max_steps - ): - steps += 1 - s.step() - logging.info("Step {}".format(steps)) - - if not desk.states[object_states.Dusty].get_value(): - logging.info("Reset because Table cleaned") - elif not bowl.states[object_states.Stained].get_value(): - logging.info("Reset because Bowl cleaned") - else: - logging.info("Reset because max steps") - - # Reset to the initial state - restoreState(pb_initial_state) - brush.load_state(brush_initial_extended_state) - brush.force_wakeup() - desk.load_state(desk_initial_extended_state) - desk.force_wakeup() - bowl.load_state(bowl_initial_extended_state) - bowl.force_wakeup() - - finally: - s.disconnect() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/object_states/sliceable_demo_with_reset.py b/igibson/examples/object_states/sliceable_demo_with_reset.py deleted file mode 100644 index fecfc32d4..000000000 --- a/igibson/examples/object_states/sliceable_demo_with_reset.py +++ /dev/null @@ -1,122 +0,0 @@ -import logging -import os - -import numpy as np -import pybullet as p - -from igibson import object_states -from igibson.objects.articulated_object import URDFObject -from igibson.objects.multi_object_wrappers import ObjectGrouper, ObjectMultiplexer -from igibson.scenes.empty_scene import EmptyScene -from igibson.simulator import Simulator -from igibson.utils.assets_utils import get_ig_model_path -from igibson.utils.utils import restoreState - - -def main(): - """ - Demo of a slicing task that resets after everything - To save/load state it combines pybullet save/load functionality and additional iG functions for the extended states - Loads an empty scene with a desk and an apple. After a while, the apple gets sliced. Then, the scene resets - This demo also demonstrates how to create manually a grouped object with multiple URDF models, and a - multiplexed object (two URDF models for the same object) with a model for the full object and a model - of the grouped slices - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - s = Simulator(mode="gui_interactive", image_width=1280, image_height=720) - # Set a better viewing direction - s.viewer.initial_pos = [-0.3, -0.3, 1.1] - s.viewer.initial_view_direction = [0.7, 0.6, -0.4] - s.viewer.reset_viewer() - scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1]) - s.import_scene(scene) - - # Load a desk - model_path = os.path.join(get_ig_model_path("breakfast_table", "19203"), "19203.urdf") - desk = URDFObject( - filename=model_path, category="breakfast_table", name="19898", scale=np.array([0.8, 0.8, 0.8]), abilities={} - ) - s.import_object(desk) - desk.set_position([0, 0, 0.4]) - - # Create an URDF object of an apple, but doesn't load it in the simulator - model_path = os.path.join(get_ig_model_path("apple", "00_0"), "00_0.urdf") - whole_obj = URDFObject( - model_path, name="00_0", category="apple", scale=np.array([1.0, 1.0, 1.0]), initial_pos=[100, 100, -100] - ) - - object_parts = [] - # Check the parts that compose the apple and create URDF objects of them - for i, part in enumerate(whole_obj.metadata["object_parts"]): - part_category = part["category"] - part_model = part["model"] - # Scale the offset accordingly - part_pos = part["pos"] * whole_obj.scale - part_orn = part["orn"] - part_model_path = get_ig_model_path(part_category, part_model) - part_filename = os.path.join(part_model_path, part_model + ".urdf") - part_obj_name = whole_obj.name + "_part_{}".format(i) - part_obj = URDFObject( - part_filename, - name=part_obj_name, - category=part_category, - model_path=part_model_path, - scale=whole_obj.scale, - initial_pos=[100 + i, 100, -100], - ) - object_parts.append((part_obj, (part_pos, part_orn))) - - # Group the apple parts into a single grouped object - grouped_parts_obj = ObjectGrouper(object_parts) - - # Create a multiplexed object: either the full apple, or the parts - multiplexed_obj = ObjectMultiplexer(whole_obj.name + "_multiplexer", [whole_obj, grouped_parts_obj], 0) - - # Finally, load the multiplexed object - s.import_object(multiplexed_obj) - multiplexed_obj.set_position([0, 0, 0.72]) - - # Let the apple get stable - for _ in range(100): - s.step() - - # Save the initial state. - initial_state_pb = p.saveState() - initial_state_multiplexed_obj = multiplexed_obj.dump_state() - logging.info(multiplexed_obj) - - try: - while True: - logging.info("Stepping the simulator") - for _ in range(100): - s.step() - - logging.info("Slicing the apple and moving the parts") - # Slice the apple and set the object parts away - multiplexed_obj.states[object_states.Sliced].set_value(True) - - # Check that the multiplexed changed to the group of parts - assert isinstance(multiplexed_obj.current_selection(), ObjectGrouper) - - # Move the parts - for part_obj in multiplexed_obj.objects: - part_obj.set_position([0, 0, 1]) - - logging.info("Stepping the simulator") - for _ in range(100): - s.step() - - logging.info("Restoring the state") - # Restore the state - restoreState(initial_state_pb) - - # The apple should become whole again - multiplexed_obj.load_state(initial_state_multiplexed_obj) - - finally: - s.disconnect() - p.removeState(initial_state_pb) - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/objects/__init__.py b/igibson/examples/objects/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/igibson/examples/objects/draw_bounding_box.py b/igibson/examples/objects/draw_bounding_box.py deleted file mode 100644 index 50f85f1a3..000000000 --- a/igibson/examples/objects/draw_bounding_box.py +++ /dev/null @@ -1,82 +0,0 @@ -import itertools -import logging -import os - -import numpy as np -import pybullet as p -import trimesh -from scipy.spatial.transform import Rotation - -import igibson -from igibson.objects.articulated_object import URDFObject -from igibson.scenes.empty_scene import EmptyScene -from igibson.simulator import Simulator -from igibson.utils import utils - - -def main(): - """ - Shows how to obtain the bounding box of an articulated object. - Draws the bounding box around the loaded object, a cabinet, while it moves. - Visible only in the pybullet GUI. - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - s = Simulator(mode="headless", use_pb_gui=True) - scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1]) - s.import_scene(scene) - - # Banana is a single-link object and Door is a multi-link object. - banana_dir = os.path.join(igibson.ig_dataset_path, "objects/banana/09_0") - banana_filename = os.path.join(banana_dir, "09_0.urdf") - door_dir = os.path.join(igibson.ig_dataset_path, "objects/door/8930") - door_filename = os.path.join(door_dir, "8930.urdf") - - banana = URDFObject( - filename=banana_filename, category="banana", scale=np.array([3.0, 5.0, 2.0]), merge_fixed_links=False - ) - door = URDFObject(filename=door_filename, category="door", scale=np.array([1.0, 2.0, 3.0]), merge_fixed_links=False) - s.import_object(banana) - s.import_object(door) - banana.set_position_orientation([2, 0, 0.75], [0, 0, 0, 1]) - door.set_position_orientation([-2, 0, 2], Rotation.from_euler("XYZ", [0, 0, -np.pi / 4]).as_quat()) - - # Main simulation loop - try: - first_step = True - while True: - # Step simulation. - s.step() - - line_idx = 0 - for obj in [banana, door]: - # Draw new debug lines for the bounding boxes. - bbox_center, bbox_orn, bbox_bf_extent, bbox_wf_extent = obj.get_base_aligned_bounding_box(visual=True) - bbox_frame_vertex_positions = np.array(list(itertools.product((1, -1), repeat=3))) * ( - bbox_bf_extent / 2 - ) - bbox_transform = utils.quat_pos_to_mat(bbox_center, bbox_orn) - world_frame_vertex_positions = trimesh.transformations.transform_points( - bbox_frame_vertex_positions, bbox_transform - ) - for i, from_vertex in enumerate(world_frame_vertex_positions): - for j, to_vertex in enumerate(world_frame_vertex_positions): - if j <= i: - assert ( - p.addUserDebugLine( - from_vertex, - to_vertex, - lineColorRGB=[1.0, 0.0, 0.0], - lineWidth=1, - lifeTime=0, - replaceItemUniqueId=-1 if first_step else line_idx, - ) - == line_idx - ) - line_idx += 1 - first_step = False - finally: - s.disconnect() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/objects/highlight_objects.py b/igibson/examples/objects/highlight_objects.py deleted file mode 100644 index d792274dc..000000000 --- a/igibson/examples/objects/highlight_objects.py +++ /dev/null @@ -1,49 +0,0 @@ -""" - Generate example top-down segmentation map via renderer -""" -import logging - -import numpy as np - -from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings -from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene -from igibson.simulator import Simulator - - -def main(): - """ - Highlights visually all object instances of some given category and then removes the highlighting - It also demonstrates how to apply an action on all instances of objects of a given category - ONLY WORKS WITH OPTIMIZED RENDERING (not on Mac) - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - settings = MeshRendererSettings(optimized=True, enable_shadow=True, blend_highlight=True) - s = Simulator( - mode="gui_interactive", - image_width=512, - image_height=512, - rendering_settings=settings, - ) - scene = InteractiveIndoorScene( - "Rs_int", texture_randomization=False, load_object_categories=["window"], object_randomization=False - ) - s.import_scene(scene) - - i = 0 - while True: - s.step() - if i % 100 == 0: - logging.info("Highlighting windows") - for obj in scene.objects_by_category["window"]: - obj.highlight() - - if i % 100 == 50: - logging.info("Deactivating the highlight on windows") - for obj in scene.objects_by_category["window"]: - obj.unhighlight() - - i += 1 - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/objects/load_object_selector.py b/igibson/examples/objects/load_object_selector.py deleted file mode 100644 index f3ed65eb6..000000000 --- a/igibson/examples/objects/load_object_selector.py +++ /dev/null @@ -1,144 +0,0 @@ -import logging -import os -from sys import platform - -import yaml - -import igibson -from igibson.envs.igibson_env import iGibsonEnv -from igibson.objects.articulated_object import URDFObject -from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings -from igibson.render.profiler import Profiler -from igibson.robots.turtlebot import Turtlebot -from igibson.scenes.empty_scene import EmptyScene -from igibson.scenes.gibson_indoor_scene import StaticIndoorScene -from igibson.simulator import Simulator -from igibson.utils.assets_utils import ( - get_all_object_categories, - get_ig_avg_category_specs, - get_ig_category_path, - get_ig_model_path, - get_object_models_of_category, -) -from igibson.utils.utils import let_user_pick, parse_config - - -def main(random_selection=False): - """ - This demo shows how to load any scaled objects from the iG object model dataset and - additional objects from the YCB dataset in predefined locations - The user selects an object model to load - The objects can be loaded into an empty scene, an interactive scene (iG) or a static scene (Gibson) - The example also shows how to use the Environment API or directly the Simulator API, loading objects and robots - and executing actions - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - scene_options = ["Empty scene", "Interactive scene (iG)", "Static scene (Gibson)"] - type_of_scene = let_user_pick(scene_options, random_selection=random_selection) - 1 - - if type_of_scene == 0: # Empty - config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml")) - settings = MeshRendererSettings(enable_shadow=False, msaa=False, texture_scale=0.5) - s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings) - scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1]) - # scene.load_object_categories(benchmark_names) - s.import_scene(scene) - robot_config = config["robot"] - robot_config.pop("name") - turtlebot = Turtlebot(**robot_config) - s.import_robot(turtlebot) - - elif type_of_scene == 1: # iG - config_filename = os.path.join(igibson.example_config_path, "turtlebot_nav.yaml") - config_data = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader) - config_data["load_object_categories"] = [] # Uncomment this line to accelerate loading with only the building - config_data["visible_target"] = False - config_data["visible_path"] = False - # Reduce texture scale for Mac. - if platform == "darwin": - config_data["texture_scale"] = 0.5 - env = iGibsonEnv(config_file=config_data, mode="gui_interactive") - s = env.simulator - - elif type_of_scene == 2: # Gibson - config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml")) - settings = MeshRendererSettings(enable_shadow=False, msaa=False) - # Reduce texture scale for Mac. - if platform == "darwin": - settings.texture_scale = 0.5 - s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings) - - scene = StaticIndoorScene("Rs", build_graph=True, pybullet_load_texture=False) - s.import_scene(scene) - robot_config = config["robot"] - robot_config.pop("name") - turtlebot = Turtlebot(**robot_config) - s.import_robot(turtlebot) - - # Set a better viewing direction - s.viewer.initial_pos = [-1.7, -0.9, 0.9] - s.viewer.initial_view_direction = [0.9, 0, -0.3] - s.viewer.reset_viewer() - - # Select a category to load - available_obj_categories = get_all_object_categories() - obj_category = available_obj_categories[ - let_user_pick(available_obj_categories, random_selection=random_selection) - 1 - ] - - # Select a model to load - available_obj_models = get_object_models_of_category(obj_category) - obj_model = available_obj_models[let_user_pick(available_obj_models, random_selection=random_selection) - 1] - - # Load the specs of the object categories, e.g., common scaling factor - avg_category_spec = get_ig_avg_category_specs() - - try: - # Create the full path combining the path for all models and the name of the model - model_path = get_ig_model_path(obj_category, obj_model) - filename = os.path.join(model_path, obj_model + ".urdf") - - # Create a unique name for the object instance - obj_name = "{}_{}".format(obj_category, 0) - - # Create and import the object - simulator_obj = URDFObject( - filename, - name=obj_name, - category=obj_category, - model_path=model_path, - avg_obj_dims=avg_category_spec.get(obj_category), - fit_avg_dim_volume=True, - texture_randomization=False, - overwrite_inertial=True, - initial_pos=[0.5, -0.5, 1.01], - ) - s.import_object(simulator_obj) - - if type_of_scene == 1: - for j in range(10): - logging.info("Resetting environment") - env.reset() - for i in range(100): - with Profiler("Environment action step"): - # action = env.action_space.sample() - state, reward, done, info = env.step([0.1, 0.1]) - if done: - logging.info("Episode finished after {} timesteps".format(i + 1)) - break - else: - for i in range(10000): - with Profiler("Simulator step"): - turtlebot.apply_action([0.1, 0.1]) - s.step() - rgb = s.renderer.render_robot_cameras(modes=("rgb")) - - finally: - if type_of_scene == 1: - env.close() - else: - s.disconnect() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/objects/load_objects.py b/igibson/examples/objects/load_objects.py deleted file mode 100644 index 114df22d0..000000000 --- a/igibson/examples/objects/load_objects.py +++ /dev/null @@ -1,168 +0,0 @@ -import logging -import os -from sys import platform - -import numpy as np -import yaml - -import igibson -from igibson.envs.igibson_env import iGibsonEnv -from igibson.objects.articulated_object import URDFObject -from igibson.objects.ycb_object import YCBObject -from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings -from igibson.render.profiler import Profiler -from igibson.robots.turtlebot import Turtlebot -from igibson.scenes.empty_scene import EmptyScene -from igibson.scenes.gibson_indoor_scene import StaticIndoorScene -from igibson.simulator import Simulator -from igibson.utils.assets_utils import get_ig_avg_category_specs, get_ig_category_path, get_ig_model_path -from igibson.utils.utils import let_user_pick, parse_config - - -def main(): - """ - This demo shows how to load scaled objects from the iG object model dataset and - additional objects from the YCB dataset in predefined locations - Loads a concrete object model of a table, and a random one of the same category, and 10 cracker boxes - The objects can be loaded into an empty scene, an interactive scene (iG) or a static scene (Gibson) - The example also shows how to use the Environment API or directly the Simulator API, loading objects and robots - and executing actions - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - scene_options = ["Empty scene", "Interactive scene (iG)", "Static scene (Gibson)"] - type_of_scene = let_user_pick(scene_options) - 1 - - if type_of_scene == 0: # Empty - config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml")) - settings = MeshRendererSettings(enable_shadow=False, msaa=False, texture_scale=0.5) - s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings) - scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1]) - # scene.load_object_categories(benchmark_names) - s.import_scene(scene) - robot_config = config["robot"] - robot_config.pop("name") - turtlebot = Turtlebot(**robot_config) - s.import_robot(turtlebot) - - elif type_of_scene == 1: # iG - config_filename = os.path.join(igibson.example_config_path, "turtlebot_nav.yaml") - config_data = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader) - config_data["load_object_categories"] = [] # Uncomment this line to accelerate loading with only the building - config_data["visible_target"] = False - config_data["visible_path"] = False - # Reduce texture scale for Mac. - if platform == "darwin": - config_data["texture_scale"] = 0.5 - env = iGibsonEnv(config_file=config_data, mode="gui_interactive") - s = env.simulator - - elif type_of_scene == 2: # Gibson - config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml")) - settings = MeshRendererSettings(enable_shadow=False, msaa=False) - # Reduce texture scale for Mac. - if platform == "darwin": - settings.texture_scale = 0.5 - s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings) - - scene = StaticIndoorScene("Rs", build_graph=True, pybullet_load_texture=False) - s.import_scene(scene) - robot_config = config["robot"] - robot_config.pop("name") - turtlebot = Turtlebot(**robot_config) - s.import_robot(turtlebot) - - # Set a better viewing direction - s.viewer.initial_pos = [-2, 1.4, 1.2] - s.viewer.initial_view_direction = [0.6, -0.8, 0.1] - s.viewer.reset_viewer() - - # Objects to load: two tables, the first one is predefined model, the second, random for the same category - table_objects_to_load = { - "table_1": { - "category": "breakfast_table", - "model": "1b4e6f9dd22a8c628ef9d976af675b86", - "pos": (0.0, -0.2, 1.01), - "orn": (0, 0, 90), - }, - "table_2": { - "category": "breakfast_table", - "pos": (0.5, -2.0, 1.01), - "orn": (0, 0, 45), - }, - } - - # Load the specs of the object categories, e.g., common scaling factor - avg_category_spec = get_ig_avg_category_specs() - - scene_objects = {} - try: - for obj in table_objects_to_load.values(): - category = obj["category"] - if category in scene_objects: - scene_objects[category] += 1 - else: - scene_objects[category] = 1 - - # Get the path for all models of this category - category_path = get_ig_category_path(category) - - # If the specific model is given, we use it. If not, we select one randomly - if "model" in obj: - model = obj["model"] - else: - model = np.random.choice(os.listdir(category_path)) - - # Create the full path combining the path for all models and the name of the model - model_path = get_ig_model_path(category, model) - filename = os.path.join(model_path, model + ".urdf") - - # Create a unique name for the object instance - obj_name = "{}_{}".format(category, scene_objects[category]) - - # Create and import the object - simulator_obj = URDFObject( - filename, - name=obj_name, - category=category, - model_path=model_path, - avg_obj_dims=avg_category_spec.get(category), - fit_avg_dim_volume=True, - texture_randomization=False, - overwrite_inertial=True, - initial_pos=obj["pos"], - initial_orn=obj["orn"], - ) - s.import_object(simulator_obj) - - for _ in range(10): - obj = YCBObject("003_cracker_box") - s.import_object(obj) - obj.set_position_orientation(np.append(np.random.uniform(low=0, high=2, size=2), [1.8]), [0, 0, 0, 1]) - - if type_of_scene == 1: - for j in range(10): - logging.info("Resetting environment") - env.reset() - for i in range(100): - with Profiler("Environment action step"): - # action = env.action_space.sample() - state, reward, done, info = env.step([0.1, 0.1]) - if done: - logging.info("Episode finished after {} timesteps".format(i + 1)) - break - else: - for i in range(10000): - with Profiler("Simulator step"): - turtlebot.apply_action([0.1, 0.1]) - s.step() - rgb = s.renderer.render_robot_cameras(modes=("rgb")) - - finally: - if type_of_scene == 1: - env.close() - else: - s.disconnect() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/objects/object_temperature_texture.py b/igibson/examples/objects/object_temperature_texture.py deleted file mode 100644 index 8e53d6172..000000000 --- a/igibson/examples/objects/object_temperature_texture.py +++ /dev/null @@ -1,57 +0,0 @@ -import logging -import os - -import igibson -from igibson import object_states -from igibson.objects.articulated_object import URDFObject -from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings -from igibson.render.profiler import Profiler -from igibson.scenes.empty_scene import EmptyScene -from igibson.simulator import Simulator -from igibson.utils.assets_utils import get_ig_model_path -from igibson.utils.utils import parse_config - - -def main(): - """ - Demo of the texture change for an object - Loads an apple and increases manually its temperature to observe the texture change from frozen, to cooked, to burnt - Also shows how to change object-specific parameters such as the burning temperature - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_static_nav.yaml")) - settings = MeshRendererSettings(enable_shadow=True, msaa=False, optimized=True) - s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings) - - # Set a better viewing direction - s.viewer.initial_pos = [0, 0.4, 0.2] - s.viewer.initial_view_direction = [-0.4, -0.9, 0] - s.viewer.reset_viewer() - - scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1]) - s.import_scene(scene) - - # Load and place an apple - apple_dir = get_ig_model_path("apple", "00_0") - apple_urdf = os.path.join(get_ig_model_path("apple", "00_0"), "00_0.urdf") - apple = URDFObject(apple_urdf, name="apple", category="apple", model_path=apple_dir) - s.import_object(apple) - apple.set_position([0, 0, 0.2]) - apple.states[object_states.Burnt].burn_temperature = 200 - - # Manually increase the temperature of the apple - for i in range(-10, 100): - temp = i * 5 - logging.info("Apple temperature: {} degrees Celsius".format(temp)) - apple.states[object_states.Temperature].set_value(temp) - logging.info("Frozen(Apple)? {}".format(apple.states[object_states.Frozen].get_value())) - logging.info("Cooked(Apple)? {}".format(apple.states[object_states.Cooked].get_value())) - logging.info("Burnt(Apple)? {}".format(apple.states[object_states.Burnt].get_value())) - for j in range(10): - # with Profiler("Simulator step"): - s.step() - s.disconnect() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/objects/object_viewer.py b/igibson/examples/objects/object_viewer.py deleted file mode 100644 index fb36be264..000000000 --- a/igibson/examples/objects/object_viewer.py +++ /dev/null @@ -1,85 +0,0 @@ -import logging -import os - -import igibson -from igibson.objects.articulated_object import URDFObject -from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings -from igibson.scenes.empty_scene import EmptyScene -from igibson.simulator import Simulator -from igibson.utils.assets_utils import ( - get_all_object_categories, - get_ig_avg_category_specs, - get_ig_model_path, - get_object_models_of_category, - get_scene_path, -) -from igibson.utils.utils import let_user_pick, parse_config - - -def main(): - """ - Minimal example to visualize all the models available in the iG dataset - It queries the user to select an object category and a model of that category, loads it and visualizes it - No physical simulation - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - - settings = MeshRendererSettings(enable_shadow=True, msaa=False, optimized=True) - s = Simulator( - mode="gui_interactive", image_width=512, image_height=512, vertical_fov=70, rendering_settings=settings - ) - scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1]) - # scene.load_object_categories(benchmark_names) - s.import_scene(scene) - s.renderer.set_light_position_direction([0, 0, 10], [0, 0, 0]) - - # Select a category to load - available_obj_categories = get_all_object_categories() - obj_category = available_obj_categories[let_user_pick(available_obj_categories) - 1] - - # Select a model to load - available_obj_models = get_object_models_of_category(obj_category) - obj_model = available_obj_models[let_user_pick(available_obj_models) - 1] - - logging.info("Visualizing category {}, model {}".format(obj_category, obj_model)) - - # Load the specs of the object categories, e.g., common scaling factor - avg_category_spec = get_ig_avg_category_specs() - - try: - # Create the full path combining the path for all models and the name of the model - model_path = get_ig_model_path(obj_category, obj_model) - filename = os.path.join(model_path, obj_model + ".urdf") - - # Create a unique name for the object instance - obj_name = "{}_{}".format(obj_category, 0) - - # Create and import the object - simulator_obj = URDFObject( - filename, - name=obj_name, - category=obj_category, - model_path=model_path, - avg_obj_dims=avg_category_spec.get(obj_category), - fit_avg_dim_volume=True, - texture_randomization=False, - overwrite_inertial=True, - initial_pos=[0.5, -0.5, 1.01], - ) - s.import_object(simulator_obj) - - # Set a better viewing direction - s.viewer.initial_pos = [2.0, 0, 1.6] - s.viewer.initial_view_direction = [-1, 0, 0] - s.viewer.reset_viewer() - - # Visualize object - while True: - s.viewer.update() - - finally: - s.disconnect() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/observations/__init__.py b/igibson/examples/observations/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/igibson/examples/observations/generate_additional_visual_channels.py b/igibson/examples/observations/generate_additional_visual_channels.py deleted file mode 100644 index c451f2dc5..000000000 --- a/igibson/examples/observations/generate_additional_visual_channels.py +++ /dev/null @@ -1,76 +0,0 @@ -import logging -import os -from sys import platform - -import cv2 -import numpy as np -import yaml - -import igibson -from igibson.envs.igibson_env import iGibsonEnv -from igibson.render.profiler import Profiler -from igibson.utils.constants import MAX_CLASS_COUNT, MAX_INSTANCE_COUNT -from igibson.utils.vision_utils import randomize_colors, segmentation_to_rgb - - -def main(): - """ - Example of rendering additional sensor modalities - Loads Rs_int (interactive) with some objects and and renders depth, normals, semantic and instance segmentation - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - config_filename = os.path.join(igibson.example_config_path, "turtlebot_nav.yaml") - config_data = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader) - # Only load a few objects - # config_data["load_object_categories"] = [ - # "breakfast_table", - # "carpet", - # "sofa", - # "bottom_cabinet", - # "sink", - # "stove", - # "fridge", - # ] - config_data["vertical_fov"] = 90 - config_data["output"] = ["task_obs", "rgb", "depth", "normal", "seg", "ins_seg"] - # Reduce texture scale for Mac. - if platform == "darwin": - config_data["texture_scale"] = 0.5 - env = iGibsonEnv(config_file=config_data, mode="gui_interactive") - # Set a better viewing direction - env.simulator.viewer.initial_pos = [0, 1.1, 1.5] - env.simulator.viewer.initial_view_direction = [0, 1, 0.1] - env.simulator.viewer.reset_viewer() - - colors_ss = randomize_colors(MAX_CLASS_COUNT, bright=True) - colors_is = randomize_colors(MAX_INSTANCE_COUNT, bright=True) - - for j in range(100): - logging.info("Resetting environment") - env.reset() - for i in range(300): - with Profiler("Environment action step"): - action = env.action_space.sample() - state, reward, done, info = env.step([0.1, 0.1]) - - depth = state["depth"] - cv2.imshow("Depth", depth) - - normal = state["normal"] - cv2.imshow("Normals", cv2.cvtColor(normal, cv2.COLOR_RGB2BGR)) - - seg = state["seg"] - seg_int = np.round(seg[:, :, 0]).astype(np.int32) - seg_cv = segmentation_to_rgb(seg_int, MAX_CLASS_COUNT, colors=colors_ss) - cv2.imshow("Semantic Segmentation", seg_cv) - - ins_seg = state["ins_seg"] - ins_seg_int = np.round(ins_seg[:, :, 0]).astype(np.int32) - ins_seg_cv = segmentation_to_rgb(ins_seg_int, MAX_INSTANCE_COUNT, colors=colors_is) - cv2.imshow("Instance Segmentation", ins_seg_cv) - - env.close() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/observations/generate_colored_dense_pointcloud.py b/igibson/examples/observations/generate_colored_dense_pointcloud.py deleted file mode 100644 index 35042f478..000000000 --- a/igibson/examples/observations/generate_colored_dense_pointcloud.py +++ /dev/null @@ -1,70 +0,0 @@ -import logging -import os -from sys import platform - -import matplotlib.pyplot as plt -import numpy as np -import yaml -from mpl_toolkits.mplot3d import Axes3D - -import igibson -from igibson.envs.igibson_env import iGibsonEnv - - -def main(): - """ - Example of rendering and visualizing a single 3D dense pointcloud - Loads Rs (non interactive) and a robot and renders a dense panorama depth map from the robot's camera - It plots the point cloud with matplotlib, colored with the RGB values - It also generates semantic segmentation - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - - # Create iGibsonEnvironment with the Fetch rearrangement config - mode = "headless" - config = os.path.join(igibson.example_config_path, "fetch_rearrangement.yaml") - config_data = yaml.load(open(config, "r"), Loader=yaml.FullLoader) - # Reduce texture scale for Mac. - if platform == "darwin": - config_data["texture_scale"] = 0.5 - scene_id = "Rs_int" - nav_env = iGibsonEnv( - config_file=config_data, mode=mode, scene_id=scene_id, action_timestep=1.0 / 120.0, physics_timestep=1.0 / 120.0 - ) - - # Generate data #################################### - # First set the robot in a random point in the scene - point = nav_env.scene.get_random_point()[1] - nav_env.robots[0].set_position(point) - nav_env.simulator.sync() - - # Then generate RGB, semantic segmentation and 3D/depth, in panorama format - pano_rgb = nav_env.simulator.renderer.get_equi(mode="rgb", use_robot_camera=True) - pano_seg = nav_env.simulator.renderer.get_equi(mode="seg", use_robot_camera=True) - pano_3d = nav_env.simulator.renderer.get_equi(mode="3d", use_robot_camera=True) - depth = np.linalg.norm(pano_3d[:, :, :3], axis=2) - theta = -np.arange(-np.pi / 2, np.pi / 2, np.pi / 128.0)[:, None] - phi = np.arange(-np.pi, np.pi, 2 * np.pi / 256)[None, :] - # depth = depth / np.cos(theta) - x = np.cos(theta) * np.sin(phi) * depth - y = np.sin(theta) * depth - z = np.cos(theta) * np.cos(phi) * depth - - # Select only a few points - mask = np.random.uniform(size=(128, 256)) > 0.5 - - pts = (np.stack([x[mask], y[mask], z[mask]]).T).astype(np.float32) - color = (pano_rgb[mask][:, :3]).astype(np.float32) - label = (pano_seg[mask][:, 0]).astype(np.int32) - - assert len(pts) == len(label) - - # Create visualization: 3D points with RGB color - fig = plt.figure() - ax = Axes3D(fig) - ax.scatter(pts[:, 0], pts[:, 2], pts[:, 1], s=3, c=color[:, :3]) - plt.show() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/observations/generate_lidar_colored_pointcloud.py b/igibson/examples/observations/generate_lidar_colored_pointcloud.py deleted file mode 100644 index 602d1993f..000000000 --- a/igibson/examples/observations/generate_lidar_colored_pointcloud.py +++ /dev/null @@ -1,139 +0,0 @@ -import logging -import os -from sys import platform - -import matplotlib.pyplot as plt -import numpy as np -import yaml -from mpl_toolkits.mplot3d import Axes3D - -import igibson -from igibson.envs.igibson_env import iGibsonEnv - - -def get_lidar_sampling_pattern(): - lidar_vertical_low = -15 / 180.0 * np.pi - lidar_vertical_high = 15 / 180.0 * np.pi - lidar_vertical_n_beams = 16 - lidar_vertical_beams = np.arange( - lidar_vertical_low, - lidar_vertical_high + (lidar_vertical_high - lidar_vertical_low) / (lidar_vertical_n_beams - 1), - (lidar_vertical_high - lidar_vertical_low) / (lidar_vertical_n_beams - 1), - ) - - lidar_horizontal_low = -45 / 180.0 * np.pi - lidar_horizontal_high = 45 / 180.0 * np.pi - lidar_horizontal_n_beams = 468 - lidar_horizontal_beams = np.arange( - lidar_horizontal_low, - lidar_horizontal_high, - (lidar_horizontal_high - lidar_horizontal_low) / (lidar_horizontal_n_beams), - ) - - xx, yy = np.meshgrid(lidar_vertical_beams, lidar_horizontal_beams) - xx = xx.flatten() - yy = yy.flatten() - - height = 128 - - x_samples = (np.tan(xx) / np.cos(yy) * height // 2 + height // 2).astype(np.int) - y_samples = (np.tan(yy) * height // 2 + height // 2).astype(np.int) - - x_samples = x_samples.flatten() - y_samples = y_samples.flatten() - return x_samples, y_samples - - -x_samples, y_samples = get_lidar_sampling_pattern() - - -def generate_data_lidar(nav_env, num_samples=3): - - rgb_all = [] - lidar_all = [] - lidar_all_2 = [] - label_all = [] - - # Set initial point (not used) - point = nav_env.scene.get_random_point()[1] - - for _ in range(num_samples): - # Sample a new point - new_point = nav_env.scene.get_random_point()[1] - # If it is too far away, sample again - while np.linalg.norm(new_point - point) > 1: - new_point = nav_env.scene.get_random_point()[1] - - # Get vector of distance and change to (y, z, x) - delta_pos = new_point - point - delta_pos = np.array([delta_pos[1], delta_pos[2], delta_pos[0]]) - - # Set new robot's position - nav_env.robots[0].set_position(new_point) - - # Get observations (panorama RGB, 3D/Depth and semantic segmentation) - pano_rgb = nav_env.simulator.renderer.get_cube(mode="rgb", use_robot_camera=True) - pano_3d = nav_env.simulator.renderer.get_cube(mode="3d", use_robot_camera=True) - pano_seg = nav_env.simulator.renderer.get_cube(mode="seg", use_robot_camera=True) - - r3 = np.array( - [[np.cos(-np.pi / 2), 0, -np.sin(-np.pi / 2)], [0, 1, 0], [np.sin(-np.pi / 2), 0, np.cos(-np.pi / 2)]] - ) - transformatiom_matrix = np.eye(3) - - for i in range(4): - lidar_all.append(pano_3d[i][:, :, :3].dot(transformatiom_matrix)[x_samples, y_samples] - delta_pos[None, :]) - rgb_all.append(pano_rgb[i][:, :, :3][x_samples, y_samples]) - label_all.append(pano_seg[i][:, :, 0][x_samples, y_samples] * 255.0) - lidar_all_2.append( - pano_3d[i][:, :, :3].dot(transformatiom_matrix)[x_samples, y_samples] * 0.9 - delta_pos[None, :] - ) - transformatiom_matrix = r3.dot(transformatiom_matrix) - - lidar_all = np.concatenate(lidar_all, 0).astype(np.float32) - lidar_all_2 = np.concatenate(lidar_all_2, 0).astype(np.float32) - rgb_all = np.concatenate(rgb_all, 0).astype(np.float32) - label_all = np.concatenate(label_all, 0).astype(np.int32) - - assert len(label_all) == len(label_all) - - direction = lidar_all - lidar_all_2 - direction = direction / (np.linalg.norm(direction, axis=1)[:, None] + 1e-5) - - return lidar_all, direction, rgb_all, label_all - - -def main(): - """ - Example of rendering and visualizing a single lidar-like pointcloud - Loads Rs (non interactive) and a robot and renders a dense panorama depth map from the robot's camera - Samples the depth map with a lidar-like pattern - It plots the point cloud with matplotlib, colored with the RGB values - It also generates segmentation and "direction" - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - - # Create environment - mode = "headless" - scene_id = "Rs_int" - config = os.path.join(igibson.example_config_path, "fetch_rearrangement.yaml") - config_data = yaml.load(open(config, "r"), Loader=yaml.FullLoader) - # Reduce texture scale for Mac. - if platform == "darwin": - config_data["texture_scale"] = 0.5 - nav_env = iGibsonEnv( - config_file=config_data, mode=mode, scene_id=scene_id, action_timestep=1.0 / 120.0, physics_timestep=1.0 / 120.0 - ) - - # Generate data - pts, direction, color, label = generate_data_lidar(nav_env) - - # Create visualization: 3D points with RGB color - fig = plt.figure() - ax = Axes3D(fig) - ax.scatter(pts[:, 0], pts[:, 2], pts[:, 1], s=3, c=color[:, :3]) - plt.show() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/observations/generate_topdown_semseg.py b/igibson/examples/observations/generate_topdown_semseg.py deleted file mode 100644 index 7e5c47a6c..000000000 --- a/igibson/examples/observations/generate_topdown_semseg.py +++ /dev/null @@ -1,60 +0,0 @@ -import colorsys -import logging - -import cv2 -import matplotlib.cm as cm -import matplotlib.pyplot as plt -import numpy as np - -from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings -from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene -from igibson.simulator import Simulator -from igibson.utils.constants import MAX_CLASS_COUNT, MAX_INSTANCE_COUNT -from igibson.utils.vision_utils import segmentation_to_rgb - - -def main(): - """ - Example of generating a topdown semantic segmentation map - Loads Rs_int (interactive) with all or some objects (code can be commented) - and and renders a semantic segmentation image top-down - This is also an example of how to load a scene without the ceiling to facilitate creating visualizations - Quit with q - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - settings = MeshRendererSettings(enable_shadow=False, msaa=False, texture_scale=0.01) - s = Simulator(mode="headless", image_width=512, image_height=512, device_idx=0, rendering_settings=settings) - scene = InteractiveIndoorScene( - "Rs_int", - texture_randomization=False, - object_randomization=False, - # load_object_categories=[ - # "breakfast_table", - # "carpet", - # "sofa", - # "bottom_cabinet", - # "sink", - # "stove", - # "fridge", - # ], - not_load_object_categories=["ceilings"], - ) - s.import_scene(scene) - camera_pose = np.array([0, 0, 6.0]) - view_direction = np.array([0, 0, -1.0]) - s.renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 1, 0]) - seg = s.renderer.render(modes=("seg"))[0] - seg_int = np.round(seg[:, :, 0] * MAX_CLASS_COUNT).astype(np.int32) - seg_color = segmentation_to_rgb(seg_int, MAX_CLASS_COUNT) - - while True: - cv2.imshow("Topdown Semantic Segmentation", seg_color) - q = cv2.waitKey(1) - if q == ord("q"): - break - - s.disconnect() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/renderer/__init__.py b/igibson/examples/renderer/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/igibson/examples/renderer/mesh_renderer_pano_example.py b/igibson/examples/renderer/mesh_renderer_pano_example.py deleted file mode 100644 index 65f169d14..000000000 --- a/igibson/examples/renderer/mesh_renderer_pano_example.py +++ /dev/null @@ -1,100 +0,0 @@ -import logging -import os -import sys - -import cv2 -import numpy as np - -from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer -from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings -from igibson.render.profiler import Profiler -from igibson.utils.assets_utils import get_scene_path - - -def main(): - """ - Creates renderer and renders panorama images in Rs (no interactive). No physics. - The camera view can be controlled. - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - global _mouse_ix, _mouse_iy, down, view_direction - - # If a model is given, we load it, otherwise we load Rs mesh (non interactive) - if len(sys.argv) > 1: - model_path = sys.argv[1] - else: - model_path = os.path.join(get_scene_path("Rs"), "mesh_z_up.obj") - - # Create renderer object and load the scene model - settings = MeshRendererSettings(enable_pbr=False) - renderer = MeshRenderer(width=512, height=512, rendering_settings=settings) - renderer.load_object(model_path) - renderer.add_instance_group([0]) - - # Print some information about the loaded model - logging.info(renderer.visual_objects, renderer.instances) - logging.info(renderer.material_idx_to_material_instance_mapping, renderer.shape_material_idx) - - # Create a simple viewer with OpenCV and a keyboard navigation - px = 0 - py = 0.2 - camera_pose = np.array([px, py, 0.5]) - view_direction = np.array([0, -1, -0.3]) - renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1]) - renderer.set_fov(90) - _mouse_ix, _mouse_iy = -1, -1 - down = False - - # Define the function callback for OpenCV events on the window - def change_dir(event, x, y, flags, param): - global _mouse_ix, _mouse_iy, down, view_direction - if event == cv2.EVENT_LBUTTONDOWN: - _mouse_ix, _mouse_iy = x, y - down = True - if event == cv2.EVENT_MOUSEMOVE: - if down: - dx = (x - _mouse_ix) / 100.0 - dy = (y - _mouse_iy) / 100.0 - _mouse_ix = x - _mouse_iy = y - r1 = np.array([[np.cos(dy), 0, np.sin(dy)], [0, 1, 0], [-np.sin(dy), 0, np.cos(dy)]]) - r2 = np.array([[np.cos(-dx), -np.sin(-dx), 0], [np.sin(-dx), np.cos(-dx), 0], [0, 0, 1]]) - view_direction = r1.dot(r2).dot(view_direction) - elif event == cv2.EVENT_LBUTTONUP: - down = False - - cv2.namedWindow("Viewer") - cv2.setMouseCallback("Viewer", change_dir) - - cv2.namedWindow("Panorama Viewer") - - # Move camera and render normal and panorama images - while True: - with Profiler("Render"): - frame = renderer.render(modes=("rgb")) - # Actual panorama image stuff - img = renderer.get_equi() - - cv2.imshow("Viewer", cv2.cvtColor(np.concatenate(frame, axis=1), cv2.COLOR_RGB2BGR)) - cv2.imshow("Panorama Viewer", img) - - q = cv2.waitKey(1) - if q == ord("w"): - px += 0.01 - elif q == ord("s"): - px -= 0.01 - elif q == ord("a"): - py += 0.01 - elif q == ord("d"): - py -= 0.01 - elif q == ord("q"): - break - camera_pose = np.array([px, py, 0.5]) - renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1]) - - # Cleanup - renderer.release() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/robots/__init__.py b/igibson/examples/robots/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/igibson/examples/robots/differential_drive_control_example.py b/igibson/examples/robots/differential_drive_control_example.py deleted file mode 100644 index b9abc36e7..000000000 --- a/igibson/examples/robots/differential_drive_control_example.py +++ /dev/null @@ -1,95 +0,0 @@ -""" -This demo shows how to use keyboard to control a two-wheeled robot -""" -import argparse -import os -from types import SimpleNamespace - -import numpy as np -import pybullet as p -import pybullet_data - -import igibson -from igibson.objects.articulated_object import URDFObject -from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings -from igibson.robots import REGISTERED_ROBOTS, TwoWheelRobot -from igibson.scenes.empty_scene import EmptyScene -from igibson.simulator import Simulator -from igibson.utils.assets_utils import get_ig_avg_category_specs, get_ig_category_path, get_ig_model_path -from igibson.utils.utils import parse_config - -parser = argparse.ArgumentParser() - -parser.add_argument( - "--robot", - type=str, - default="Turtlebot", - choices=list(REGISTERED_ROBOTS.keys()), - help="Robot to use. Note that the selected robot must support differential drive!", -) - -args = parser.parse_args() - - -def main(args): - rendering_settings = MeshRendererSettings( - optimized=True, - fullscreen=False, - enable_shadow=True, - enable_pbr=True, - msaa=True, - light_dimming_factor=1.0, - ) - s = Simulator( - mode="headless", use_pb_gui=True, rendering_settings=rendering_settings, image_height=512, image_width=512 - ) - - scene = EmptyScene() - s.scene = scene - scene.objects_by_id = {} - - s.import_scene(scene) - p.setAdditionalSearchPath(pybullet_data.getDataPath()) - - agent = REGISTERED_ROBOTS[args.robot](action_type="continuous") - - s.import_robot(agent) - - # Make sure robot is a two wheeled robot and using differential drive - assert isinstance(agent, TwoWheelRobot), "Robot must be a TwoWheelRobot!" - assert ( - agent.controller_config["base"]["name"] == "DifferentialDriveController" - ), "Robot must be using differential drive control for its base" - - agent.reset() - i = 0 - actions = [] - print("Press ctrl-q to quit") - while True: - i += 1 - action = np.zeros(agent.action_dim) - - # 0 - forward/backwards (good) (up/down) - # 1 - rotate robot base (good) (left/right) - - events = p.getKeyboardEvents() - if 65295 in events: - action[1] += -0.10 - if 65296 in events: - action[1] += 0.10 - if 65297 in events: - action[0] += 0.2 - if 65298 in events: - action[0] += -0.2 - if 65307 in events and 113 in events: - break - - agent.apply_action(action) - actions.append(action) - p.stepSimulation() - - s.disconnect() - - -if __name__ == "__main__": - main(args) diff --git a/igibson/examples/robots/ik_control_example.py b/igibson/examples/robots/ik_control_example.py deleted file mode 100644 index fae6353d2..000000000 --- a/igibson/examples/robots/ik_control_example.py +++ /dev/null @@ -1,182 +0,0 @@ -""" -This demo shows how to use keyboard to control a Fetch robot via IK -""" -import argparse -import os -from types import SimpleNamespace - -import numpy as np -import pybullet as p -import pybullet_data - -import igibson -from igibson.objects.articulated_object import URDFObject -from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings -from igibson.robots.fetch import Fetch -from igibson.scenes.empty_scene import EmptyScene -from igibson.simulator import Simulator -from igibson.utils.assets_utils import get_ig_avg_category_specs, get_ig_category_path, get_ig_model_path -from igibson.utils.utils import parse_config - - -def main(): - rendering_settings = MeshRendererSettings( - optimized=True, - fullscreen=False, - enable_shadow=True, - enable_pbr=True, - msaa=True, - light_dimming_factor=1.0, - ) - s = Simulator( - mode="headless", use_pb_gui=True, rendering_settings=rendering_settings, image_height=512, image_width=512 - ) - p.configureDebugVisualizer(p.COV_ENABLE_GUI, enable=0) - - scene = EmptyScene() - s.scene = scene - scene.objects_by_id = {} - - s.import_scene(scene) - p.setAdditionalSearchPath(pybullet_data.getDataPath()) - - config = parse_config(os.path.join(igibson.root_path, "examples", "configs", "behavior_onboard_sensing_fetch.yaml")) - vr_agent = Fetch(config) - s.import_robot(vr_agent) - - table_objects_to_load = { - "table_1": { - "category": "breakfast_table", - "model": "1b4e6f9dd22a8c628ef9d976af675b86", - "pos": (1.500000, 0.000000, 0.35), - "orn": (0, 0, 1, 1), - }, - "coffee_cup_1": { - "category": "coffee_cup", - "model": "coffee_cup_000", - "pos": (1.5, 0.20, 0.8), - "orn": (0, 0, 0, 1), - }, - "plate_1": { - "category": "plate", - "model": "plate_000", - "pos": (1.5, -0.20, 0.8), - "orn": (0, 0, 0, 1), - }, - } - - avg_category_spec = get_ig_avg_category_specs() - - scene_objects = {} - for obj in table_objects_to_load.values(): - category = obj["category"] - if category in scene_objects: - scene_objects[category] += 1 - else: - scene_objects[category] = 1 - - category_path = get_ig_category_path(category) - if "model" in obj: - model = obj["model"] - else: - model = np.random.choice(os.listdir(category_path)) - model_path = get_ig_model_path(category, model) - filename = os.path.join(model_path, model + ".urdf") - obj_name = "{}_{}".format(category, scene_objects[category]) - - simulator_obj = URDFObject( - filename, - name=obj_name, - category=category, - model_path=model_path, - avg_obj_dims=avg_category_spec.get(category), - fit_avg_dim_volume=True, - texture_randomization=False, - overwrite_inertial=True, - initial_pos=obj["pos"], - initial_orn=[0, 0, 90], - ) - s.import_object(simulator_obj) - simulator_obj.set_orientation(obj["orn"]) - - vr_agent.reset() - i = 0 - actions = [] - print("Press ctrl-q to quit") - while True: - i += 1 - action = np.zeros(11) - action[-1] = -1.0 - - # 0 - forward/backwards (good) (up/down) - # 1 - rotate robot base (good) (left/right) - # 2 - turn head left/right (bad) (a/d) - # * after reaching end of rotation, causes: - # * arm to move, and then body to decrease height -0.1/0.1) - # 3 - turn head up/down (good) (r/f) - # 4 - move gripper towards/away from robot (good) (y/h) - # 5 - move gripper left/right (good) (j/l) - # 6 - move gripper down/up (bad) (i/k) - # * also moves body - # 7 - rotate gripper clockwise/counter clockwise in Z plane (u/o) - # * also moves body - # 8 - rotate gripper towards/out from body (x plane) ([/') - # * also moves body - # 9 - rotate gripper along plane of floor (y plane) - # * also moves body - # 10 - close/open gripper (good) (z/x) - - events = p.getKeyboardEvents() - if 65295 in events: - action[1] += -0.1 - if 65296 in events: - action[1] += 0.1 - if 65297 in events: - action[0] += 0.1 - if 65298 in events: - action[0] += -0.1 - if 97 in events: - action[2] += 0.1 - if 100 in events: - action[2] += -0.1 - if 114 in events: - action[3] += 0.1 - if 102 in events: - action[3] += -0.1 - if 122 in events: - action[10] += 0.1 - if 120 in events: - action[10] += -0.1 - if 105 in events: - action[6] += 0.1 - if 107 in events: - action[6] += -0.1 - if 106 in events: - action[5] += 0.1 - if 108 in events: - action[5] += -0.1 - if 117 in events: - action[7] += 0.1 - if 111 in events: - action[7] += -0.1 - if 121 in events: - action[4] += 0.1 - if 104 in events: - action[4] += -0.1 - if 91 in events: - action[8] += 0.1 - if 39 in events: - action[8] += -0.1 - if 65307 in events and 113 in events: - break - - action *= 5 - vr_agent.apply_action(action) - actions.append(action) - p.stepSimulation() - - s.disconnect() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/robots/robot_example.py b/igibson/examples/robots/robot_example.py deleted file mode 100644 index b801de7c4..000000000 --- a/igibson/examples/robots/robot_example.py +++ /dev/null @@ -1,64 +0,0 @@ -import logging -import os - -import numpy as np - -import igibson -from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings -from igibson.robots import REGISTERED_ROBOTS -from igibson.scenes.empty_scene import EmptyScene -from igibson.simulator import Simulator -from igibson.utils.utils import parse_config - - -def main(): - """ - Robot demo - Loads all robots in an empty scene, generate random actions - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - # Create empty scene - settings = MeshRendererSettings(enable_shadow=False, msaa=False, texture_scale=0.5) - s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings) - scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1]) - s.import_scene(scene) - - # Create one instance of each robot aligned along the y axis - position = [0, 0, 0] - robots = {} - for robot_config_file in os.listdir(os.path.join(igibson.example_config_path, "robots")): - config = parse_config(os.path.join(igibson.example_config_path, "robots", robot_config_file)) - robot_config = config["robot"] - robot_name = robot_config.pop("name") - robot = REGISTERED_ROBOTS[robot_name](**robot_config) - s.import_robot(robot) - robot.set_position(position) - robot.reset() - robot.keep_still() - robots[robot_name] = (robot, position[1]) - logging.info("Loaded " + robot_name) - logging.info("Moving " + robot_name) - # Set viewer in front - s.viewer.initial_pos = [1.6, 0, 1.3] - s.viewer.initial_view_direction = [-0.7, 0, -0.7] - s.viewer.reset_viewer() - - for _ in range(100): # keep still for 10 seconds - s.step() - - for _ in range(30): - action = np.random.uniform(-1, 1, robot.action_dim) - robot.apply_action(action) - for _ in range(10): - s.step() - - robot.keep_still() - s.reload() - scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1]) - s.import_scene(scene) - - s.disconnect() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/ros/__init__.py b/igibson/examples/ros/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/igibson/examples/ros/igibson-ros/turtlebot_rgbd.yaml b/igibson/examples/ros/igibson-ros/turtlebot_rgbd.yaml index fb60c9656..e36e9a69c 100644 --- a/igibson/examples/ros/igibson-ros/turtlebot_rgbd.yaml +++ b/igibson/examples/ros/igibson-ros/turtlebot_rgbd.yaml @@ -72,5 +72,5 @@ depth_noise_rate: 0.0 scan_noise_rate: 0.0 # visual objects -visible_target: true - +visual_object_at_initial_target_pos: true +target_visual_object_visible_to_agent: false diff --git a/igibson/examples/scenes/__init__.py b/igibson/examples/scenes/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/igibson/examples/scenes/g_scene_selector.py b/igibson/examples/scenes/g_scene_selector.py deleted file mode 100644 index d9ded2338..000000000 --- a/igibson/examples/scenes/g_scene_selector.py +++ /dev/null @@ -1,57 +0,0 @@ -import logging -from sys import platform - -import numpy as np - -from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings -from igibson.render.profiler import Profiler -from igibson.scenes.gibson_indoor_scene import StaticIndoorScene -from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene -from igibson.simulator import Simulator -from igibson.utils.assets_utils import get_available_g_scenes -from igibson.utils.utils import let_user_pick - - -def main(): - """ - Prompts the user to select any available non-interactive scene and loads it. - Shows how to load directly scenes without the Environment interface - Shows how to sample points in the scene and how to compute geodesic distance and the shortest path - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - available_g_scenes = get_available_g_scenes() - scene_id = available_g_scenes[let_user_pick(available_g_scenes) - 1] - settings = MeshRendererSettings(enable_shadow=True, msaa=False) - # Reduce texture scale for Mac. - if platform == "darwin": - settings.texture_scale = 0.5 - s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings) - - scene = StaticIndoorScene( - scene_id, - build_graph=True, - ) - s.import_scene(scene) - - # Shows how to sample points in the scene - np.random.seed(0) - for _ in range(10): - random_floor = scene.get_random_floor() - p1 = scene.get_random_point(random_floor)[1] - p2 = scene.get_random_point(random_floor)[1] - shortest_path, geodesic_distance = scene.get_shortest_path(random_floor, p1[:2], p2[:2], entire_path=True) - logging.info("Random point 1: {}".format(p1)) - logging.info("Random point 2: {}".format(p2)) - logging.info("Geodesic distance between p1 and p2: {}".format(geodesic_distance)) - logging.info("Shortest path from p1 to p2: {}".format(shortest_path)) - - input("Press enter") - - while True: - with Profiler("Simulator step"): - s.step() - s.disconnect() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/scenes/ig_scene_selector.py b/igibson/examples/scenes/ig_scene_selector.py deleted file mode 100644 index de46236f8..000000000 --- a/igibson/examples/scenes/ig_scene_selector.py +++ /dev/null @@ -1,60 +0,0 @@ -import logging -from sys import platform - -import numpy as np - -from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings -from igibson.render.profiler import Profiler -from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene -from igibson.simulator import Simulator -from igibson.utils.assets_utils import get_available_ig_scenes -from igibson.utils.utils import let_user_pick - - -def main(): - """ - Prompts the user to select any available interactive scene and loads it. - Shows how to load directly scenes without the Environment interface - Shows how to sample points in the scene by room type and how to compute geodesic distance and the shortest path - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - available_ig_scenes = get_available_ig_scenes() - scene_id = available_ig_scenes[let_user_pick(available_ig_scenes) - 1] - settings = MeshRendererSettings(enable_shadow=True, msaa=False) - if platform == "darwin": - settings.texture_scale = 0.5 - s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings) - - scene = InteractiveIndoorScene( - scene_id, - load_object_categories=[], # To load only the building. Fast - build_graph=True, - ) - s.import_scene(scene) - - # Shows how to sample points in the scene - np.random.seed(0) - for _ in range(10): - pt = scene.get_random_point_by_room_type("living_room")[1] - logging.info("Random point in living_room: {}".format(pt)) - - for _ in range(10): - random_floor = scene.get_random_floor() - p1 = scene.get_random_point(random_floor)[1] - p2 = scene.get_random_point(random_floor)[1] - shortest_path, geodesic_distance = scene.get_shortest_path(random_floor, p1[:2], p2[:2], entire_path=True) - logging.info("Random point 1: {}".format(p1)) - logging.info("Random point 2: {}".format(p2)) - logging.info("Geodesic distance between p1 and p2: {}".format(geodesic_distance)) - logging.info("Shortest path from p1 to p2: {}".format(shortest_path)) - - input("Press enter") - - while True: - with Profiler("Simulator step"): - s.step() - s.disconnect() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/scenes/scene_object_rand_example.py b/igibson/examples/scenes/scene_object_rand_example.py deleted file mode 100644 index 61e8e75e0..000000000 --- a/igibson/examples/scenes/scene_object_rand_example.py +++ /dev/null @@ -1,63 +0,0 @@ -import logging -from sys import platform - -from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings -from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene -from igibson.simulator import Simulator - - -def main(): - """ - Example of randomization of the texture in a scene - Loads Rs_int (interactive) and randomizes the texture of the objects - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - settings = MeshRendererSettings(enable_shadow=True, msaa=False) - if platform == "darwin": - settings.texture_scale = 0.5 - s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings) - # load_object_categories = None - load_object_categories = [ - "bottom_cabinet", - "sofa", - "rocking_chair", - "swivel_chair", - "folding_chair", - "desk", - "dining_table", - "coffee_table", - "breakfast_table", - ] # Change to None to load the full house - scene = InteractiveIndoorScene( - "Rs_int", - texture_randomization=False, - load_object_categories=load_object_categories, - object_randomization=True, - ) - s.import_scene(scene) - loaded = True - - for i in range(1, 10000): - if not loaded: - logging.info("Randomize object models") - - s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings) - scene = InteractiveIndoorScene( - "Rs_int", - texture_randomization=False, - load_object_categories=load_object_categories, - object_randomization=True, - ) - s.import_scene(scene) - loaded = True - - if i % 1000 == 0: - s.reload() - loaded = False - - s.step() - s.disconnect() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/scenes/scene_partial_loading_example.py b/igibson/examples/scenes/scene_partial_loading_example.py deleted file mode 100644 index 417f3c24b..000000000 --- a/igibson/examples/scenes/scene_partial_loading_example.py +++ /dev/null @@ -1,33 +0,0 @@ -import logging -from sys import platform - -from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings -from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene -from igibson.simulator import Simulator - - -def main(): - """ - Example of partial loading of a scene - Loads only some objects (by category) and in some room types - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - settings = MeshRendererSettings(enable_shadow=True, msaa=False) - if platform == "darwin": - settings.texture_scale = 0.5 - s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings) - scene = InteractiveIndoorScene( - "Rs_int", - texture_randomization=False, - object_randomization=False, - load_object_categories=["swivel_chair"], - load_room_types=["living_room"], - ) - s.import_scene(scene) - - while True: - s.step() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/scenes/scene_texture_rand_example.py b/igibson/examples/scenes/scene_texture_rand_example.py deleted file mode 100644 index 9dc8f1d95..000000000 --- a/igibson/examples/scenes/scene_texture_rand_example.py +++ /dev/null @@ -1,33 +0,0 @@ -import logging - -from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings -from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene -from igibson.simulator import Simulator - - -def main(): - """ - Example of randomization of the texture in a scene - Loads Rs_int (interactive) and randomizes the texture of the objects - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - settings = MeshRendererSettings(enable_shadow=False, msaa=False) - s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings) - scene = InteractiveIndoorScene( - "Rs_int", - load_object_categories=[], # To load only the building. Fast - texture_randomization=True, - object_randomization=False, - ) - s.import_scene(scene) - - for i in range(10000): - if i % 1000 == 0: - logging.info("Randomize texture") - scene.randomize_texture() - s.step() - s.disconnect() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/scenes/stadium_example.py b/igibson/examples/scenes/stadium_example.py deleted file mode 100644 index 465c6a4fa..000000000 --- a/igibson/examples/scenes/stadium_example.py +++ /dev/null @@ -1,29 +0,0 @@ -import logging - -from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings -from igibson.render.profiler import Profiler -from igibson.scenes.stadium_scene import StadiumScene -from igibson.simulator import Simulator - - -def main(): - """ - Loads the Stadium scene - This scene is default in pybullet but is not really useful in iGibson - """ - logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80) - - settings = MeshRendererSettings(enable_shadow=False, msaa=False) - s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings) - - scene = StadiumScene() - s.import_scene(scene) - - while True: - with Profiler("Simulator step"): - s.step() - s.disconnect() - - -if __name__ == "__main__": - main() diff --git a/igibson/examples/vr/__init__.py b/igibson/examples/vr/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/igibson/examples/web_ui/__init__.py b/igibson/examples/web_ui/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/igibson/external/motion/motion_planners/rrt_connect.py b/igibson/external/motion/motion_planners/rrt_connect.py index e107678df..242f6514f 100644 --- a/igibson/external/motion/motion_planners/rrt_connect.py +++ b/igibson/external/motion/motion_planners/rrt_connect.py @@ -21,6 +21,7 @@ def rrt_connect(q1, q2, distance_fn, sample_fn, extend_fn, collision_fn, iterati tree1, tree2 = nodes1, nodes2 if swap: tree1, tree2 = nodes2, nodes1 + s = sample_fn() last1 = argmin(lambda n: distance_fn(n.config, s), tree1) @@ -40,7 +41,7 @@ def rrt_connect(q1, q2, distance_fn, sample_fn, extend_fn, collision_fn, iterati path1, path2 = last1.retrace(), last2.retrace() if swap: path1, path2 = path2, path1 - #print('{} iterations, {} nodes'.format(iteration, len(nodes1) + len(nodes2))) + print('{} iterations, {} nodes'.format(iteration, len(nodes1) + len(nodes2))) return configs(path1[:-1] + path2[::-1]) return None @@ -62,13 +63,13 @@ def birrt(q1, q2, distance, sample, extend, collision, if collision(q1) or collision(q2): return None path = direct_path(q1, q2, extend, collision) + if path is not None: return path for attempt in irange(restarts + 1): path = rrt_connect(q1, q2, distance, sample, extend, collision, iterations=iterations) if path is not None: - #print('{} attempts'.format(attempt)) if smooth is None: return path return smooth_path(path, extend, collision, iterations=smooth) diff --git a/igibson/external/motion/motion_planners/utils.py b/igibson/external/motion/motion_planners/utils.py index 4a69a07b9..df0b9378e 100644 --- a/igibson/external/motion/motion_planners/utils.py +++ b/igibson/external/motion/motion_planners/utils.py @@ -8,8 +8,8 @@ INF = float('inf') -RRT_ITERATIONS = 20 -RRT_RESTARTS = 2 +RRT_ITERATIONS = 500 #100 +RRT_RESTARTS = 10 #2 RRT_SMOOTHING = 20 diff --git a/igibson/external/pybullet_tools/utils.py b/igibson/external/pybullet_tools/utils.py index 7bcdecd00..e53b26a8d 100644 --- a/igibson/external/pybullet_tools/utils.py +++ b/igibson/external/pybullet_tools/utils.py @@ -2580,7 +2580,9 @@ def approximate_as_cylinder(body, **kwargs): #MAX_DISTANCE = 1e-3 -MAX_DISTANCE = 0. +MAX_DISTANCE = 0.1 + +MAX_DISTANCE_GRASP = 1e-3 def set_all_collisions(body_id, enabled=1): @@ -2598,7 +2600,7 @@ def set_coll_filter(target_id, body_id, body_links, enable): Sets collision filters for body - to enable or disable them :param target_id: physics body to enable/disable collisions with :param body_id: physics body to enable/disable collisions from - :param body_links: physics links on body to enable/disable collisions from + :param body_id: physics links on body to enable/disable collisions from :param enable: whether to enable/disable collisions """ target_link_idxs = [-1] + [i for i in range(p.getNumJoints(target_id))] diff --git a/igibson/metrics/agent.py b/igibson/metrics/agent.py index 276a0a69d..7619eca45 100644 --- a/igibson/metrics/agent.py +++ b/igibson/metrics/agent.py @@ -26,14 +26,14 @@ def __init__(self): self.clip = 0.2 - def step_callback(self, env, _): - robot = env.robots[0] + def step_callback(self, igbhvr_act_inst, _): + robot = igbhvr_act_inst.simulator.robots[0] agent_work = {part: 0 for part in ["left_hand", "right_hand", "body"]} agent_distance = {part: 0 for part in ["left_hand", "right_hand", "body"]} for part in ["left_hand", "right_hand", "body"]: self.next_state_cache[part] = { - "position": np.array(p.getBasePositionAndOrientation(robot.links[part].get_body_id())[0]), + "position": np.array(p.getBasePositionAndOrientation(robot.parts[part].body_id)[0]), } if not self.initialized: @@ -62,19 +62,18 @@ def step_callback(self, env, _): self.agent_pos[part].append(list(self.state_cache[part]["position"])) # Exclude agent teleports delta_pos = np.clip(delta_pos, -self.clip, self.clip) - if robot.links[part].movement_cid is None: + if robot.parts[part].movement_cid is None: force = 0 work = 0 else: - force = p.getConstraintState(robot.links[part].movement_cid) + force = p.getConstraintState(robot.parts[part].movement_cid) work = np.abs((delta_pos * np.linalg.norm(force))) distance = np.abs(delta_pos) if part in ["left_hand", "right_hand"]: - self.agent_local_pos[part].append(list(robot.links[part].get_local_position_orientation()[0])) + self.agent_local_pos[part].append(list(robot.parts[part].local_pos)) if part in ["left_hand", "right_hand"] and ( - len(p.getContactPoints(robot.links[part].get_body_id())) > 0 - or robot.links[part].object_in_hand is not None + len(p.getContactPoints(robot.parts[part].body_id)) > 0 or robot.parts[part].object_in_hand is not None ): self.delta_agent_grasp_distance[part].append(distance) self.agent_grasping[part].append(True) @@ -133,8 +132,8 @@ def __init__(self): self.clip = 0.2 - def step_callback(self, env, _): - robot = env.robots[0] + def step_callback(self, igbhvr_act_inst, _): + robot = igbhvr_act_inst.simulator.robots[0] agent_distance = {part: 0 for part in self.agent_pos} self.next_state_cache = { @@ -160,9 +159,9 @@ def step_callback(self, env, _): gripper_distance = np.abs(delta_pos) self.delta_agent_distance["gripper"].append(gripper_distance) - self.agent_local_pos["gripper"].append(robot.get_relative_eef_position().tolist()) + self.agent_local_pos["gripper"].append(list(robot.get_relative_eef_position())) - contacts = p.getContactPoints(bodyA=robot.get_body_id(), linkIndexA=robot.eef_link_id) + contacts = p.getContactPoints(bodyA=robot.robot_ids[0], linkIndexA=robot.eef_link_id) if len(contacts) > 0: self.delta_agent_grasp_distance["gripper"].append(gripper_distance) self.agent_grasping["gripper"].append(True) diff --git a/igibson/metrics/dataset.py b/igibson/metrics/dataset.py deleted file mode 100644 index d5a175979..000000000 --- a/igibson/metrics/dataset.py +++ /dev/null @@ -1,28 +0,0 @@ -from collections import defaultdict - -import numpy as np - -from igibson.metrics.metric_base import MetricBase - - -class DatasetMetric(MetricBase): - """ - Used to transform saved demos into (state, action) pairs - """ - - def __init__(self): - self.prev_state = None - self.state_history = defaultdict(list) - - def start_callback(self, env, _): - self.prev_state = env.get_state() - - def step_callback(self, env, log_reader): - # when called, action is at timestep t and the state is at timestep t-1, so we need to use the previous state - self.prev_state["action"] = log_reader.get_agent_action("vr_robot") - for key, value in self.prev_state.items(): - self.state_history[key].append(value) - self.prev_state = env.get_state() - - def gather_results(self): - return self.state_history diff --git a/igibson/metrics/disarrangement.py b/igibson/metrics/disarrangement.py index bcfa7dcfd..2b6726828 100644 --- a/igibson/metrics/disarrangement.py +++ b/igibson/metrics/disarrangement.py @@ -21,9 +21,9 @@ def __init__(self): self.delta_obj_disp_dict = {} self.int_obj_disp_dict = {} - def update_state_cache(self, env): + def update_state_cache(self, task): state_cache = {} - for obj_id, obj in env.scene.objects_by_name.items(): + for obj_id, obj in task.scene.objects_by_name.items(): if obj.category == "agent": continue if type(obj) == ObjectMultiplexer: @@ -87,9 +87,9 @@ def calculate_object_disarrangement(obj, prev_state_cache, cur_state_cache): raise Exception return obj_disarrangement - def step_callback(self, env, _): + def step_callback(self, igbhvr_act_inst, _): total_disarrangement = 0 - self.cur_state_cache = self.update_state_cache(env) + self.cur_state_cache = self.update_state_cache(igbhvr_act_inst) if not self.initialized: self.prev_state_cache = copy.deepcopy(self.cur_state_cache) @@ -144,7 +144,7 @@ def __init__(self): self.next_state_cache = {} @staticmethod - def cache_single_object(obj_id, obj, room_floors, env): + def cache_single_object(obj_id, obj, room_floors, task): obj_cache = {} for state_class, state in obj.states.items(): if not isinstance(state, BooleanState): @@ -160,7 +160,7 @@ def cache_single_object(obj_id, obj, room_floors, env): obj_cache[state_class] = relational_state_cache else: relational_state_cache = {} - for target_obj_id, target_obj in env.scene.objects_by_name.items(): + for target_obj_id, target_obj in task.scene.objects_by_name.items(): if obj_id == target_obj_id or target_obj.category == "agent": continue relational_state_cache[target_obj_id] = False @@ -173,36 +173,36 @@ def cache_single_object(obj_id, obj, room_floors, env): obj_cache[state_class] = relational_state_cache return obj_cache - def create_object_logical_state_cache(self, env): + def create_object_logical_state_cache(self, task): room_floors = { "room_floor_" + room_inst: RoomFloor( category="room_floor", name="room_floor_" + room_inst, - scene=env.scene, + scene=task.scene, room_instance=room_inst, - floor_obj=env.scene.objects_by_name["floors"], + floor_obj=task.scene.objects_by_name["floors"], ) - for room_inst in env.scene.room_ins_name_to_ins_id.keys() + for room_inst in task.scene.room_ins_name_to_ins_id.keys() } state_cache = {} - for obj_id, obj in env.scene.objects_by_name.items(): + for obj_id, obj in task.scene.objects_by_name.items(): if obj.category == "agent": continue state_cache[obj_id] = {} if type(obj) == ObjectMultiplexer: if obj.current_index == 0: - cache_base = self.cache_single_object(obj_id, obj._multiplexed_objects[0], room_floors, env) + cache_base = self.cache_single_object(obj_id, obj._multiplexed_objects[0], room_floors, task) cache_part_1 = None cache_part_2 = None else: cache_base = None cache_part_1 = self.cache_single_object( - obj_id, obj._multiplexed_objects[1].objects[0], room_floors, env.task + obj_id, obj._multiplexed_objects[1].objects[0], room_floors, task ) cache_part_2 = self.cache_single_object( - obj_id, obj._multiplexed_objects[1].objects[1], room_floors, env.task + obj_id, obj._multiplexed_objects[1].objects[1], room_floors, task ) state_cache[obj_id] = { "base_states": cache_base, @@ -211,7 +211,7 @@ def create_object_logical_state_cache(self, env): "type": "multiplexer", } else: - cache_base = self.cache_single_object(obj_id, obj, room_floors, env.task) + cache_base = self.cache_single_object(obj_id, obj, room_floors, task) state_cache[obj_id] = { "base_states": cache_base, "type": "standard", @@ -328,25 +328,25 @@ def compute_logical_disarrangement(self, object_state_cache_1, object_state_cach "total_states": total_states, } - def step_callback(self, env, _): - if not self.initialized and env.simulator.frame_count == SIMULATOR_SETTLE_TIME: - self.initial_state_cache = self.create_object_logical_state_cache(env) + def step_callback(self, igbhvr_act_inst, _): + if not self.initialized and igbhvr_act_inst.simulator.frame_count == SIMULATOR_SETTLE_TIME: + self.initial_state_cache = self.create_object_logical_state_cache(igbhvr_act_inst) self.initialized = True else: return - def end_callback(self, env, _): + def end_callback(self, igbhvr_act_inst, _): """ When pybullet sleeps objects, getContactPoints is no longer refreshed Setting collision groups on the agent (which happens when users first activate the agent) Wipes active collision groups. To get the logical disarrangement, we must wake up all objects in the scene This can only be done at the end of the scene so as to not affect determinism. """ - for obj in env.scene.objects_by_name.values(): + for obj in igbhvr_act_inst.scene.objects_by_name.values(): obj.force_wakeup() - env.simulator.step() + igbhvr_act_inst.simulator.step() - self.cur_state_cache = self.create_object_logical_state_cache(env) + self.cur_state_cache = self.create_object_logical_state_cache(igbhvr_act_inst) self.relative_logical_disarrangement = self.compute_logical_disarrangement( self.initial_state_cache, self.cur_state_cache diff --git a/igibson/metrics/gaze.py b/igibson/metrics/gaze.py index eaade67cb..b2caa1ba8 100644 --- a/igibson/metrics/gaze.py +++ b/igibson/metrics/gaze.py @@ -32,14 +32,16 @@ def __init__(self): self.obj_info_map = None self.gaze_marker = None - def start_callback(self, env, _): - self.name_to_category = {obj.name: obj.category for obj in env.scene.objects_by_name.values()} - self.task_obj_info = {obj.name: obj.category for obj in env.task.object_scope.values()} + def start_callback(self, igbhvr_act_inst, _): + self.name_to_category = { + obj.name: obj.category for obj in igbhvr_act_inst.simulator.scene.objects_by_name.values() + } + self.task_obj_info = {obj.name: obj.category for obj in igbhvr_act_inst.object_scope.values()} - self.gaze_marker = GazeVizMarker(env.simulator, 0.02) + self.gaze_marker = GazeVizMarker(igbhvr_act_inst.simulator, 0.02) - def step_callback(self, env, log_reader): - s = env.simulator + def step_callback(self, igbhvr_act_inst, log_reader): + s = igbhvr_act_inst.simulator eye_data = log_reader.get_vr_data().query("eye_data") if eye_data[0]: if self.target_obj in s.scene.objects_by_id: diff --git a/igibson/metrics/metric_base.py b/igibson/metrics/metric_base.py index 99038d2fd..14429144b 100644 --- a/igibson/metrics/metric_base.py +++ b/igibson/metrics/metric_base.py @@ -4,13 +4,13 @@ class MetricBase(with_metaclass(ABCMeta, object)): - def start_callback(self, env, log_reader): + def start_callback(self, igbhvr_act_inst, log_reader): pass - def step_callback(self, env, log_reader): + def step_callback(self, igbhvr_act_inst, log_reader): pass - def end_callback(self, env, log_reader): + def end_callback(self, igbhvr_act_inst, log_reader): pass @abstractmethod diff --git a/igibson/metrics/task.py b/igibson/metrics/task.py index c4d822a0b..e58cc9c9b 100644 --- a/igibson/metrics/task.py +++ b/igibson/metrics/task.py @@ -6,30 +6,31 @@ class TaskMetric(MetricBase): def __init__(self): self.satisfied_predicates = [] + self.q_score = [] self.timesteps = 0 - def start_callback(self, env, _): - self.render_timestep = env.simulator.render_timestep + def start_callback(self, igbhvr_act_instance, _): + self.render_timestep = igbhvr_act_instance.simulator.render_timestep - def step_callback(self, env, _): + def step_callback(self, igbhvr_act_inst, _): self.timesteps += 1 - self.satisfied_predicates.append(env.task.current_goal_status) - - def end_callback(self, env, _): + self.satisfied_predicates.append(igbhvr_act_inst.current_goal_status) candidate_q_score = [] - for option in env.task.ground_goal_state_options: + for option in igbhvr_act_inst.ground_goal_state_options: predicate_truth_values = [] for predicate in option: predicate_truth_values.append(predicate.evaluate()) candidate_q_score.append(np.mean(predicate_truth_values)) - self.final_q_score = np.max(candidate_q_score) + self.q_score.append(np.max(candidate_q_score)) def gather_results(self): return { "satisfied_predicates": { "timestep": self.satisfied_predicates, }, - "q_score": {"final": self.final_q_score}, + "q_score": { + "timestep": self.q_score, + }, "time": { "simulator_steps": self.timesteps, "simulator_time": self.timesteps * self.render_timestep, diff --git a/igibson/object_states/__init__.py b/igibson/object_states/__init__.py index 2feda22ff..95f6b2940 100644 --- a/igibson/object_states/__init__.py +++ b/igibson/object_states/__init__.py @@ -14,13 +14,7 @@ from igibson.object_states.on_top import OnTop from igibson.object_states.open import Open from igibson.object_states.pose import Pose -from igibson.object_states.robot_related_states import ( - InFOVOfRobot, - InHandOfRobot, - InReachOfRobot, - InSameRoomAsRobot, - ObjectsInFOVOfRobot, -) +from igibson.object_states.robot_related_states import InFOVOfRobot, InHandOfRobot, InReachOfRobot, InSameRoomAsRobot from igibson.object_states.room_states import ( ROOM_STATES, InsideRoomTypes, diff --git a/igibson/object_states/adjacency.py b/igibson/object_states/adjacency.py index ba25e98d7..5947fd051 100644 --- a/igibson/object_states/adjacency.py +++ b/igibson/object_states/adjacency.py @@ -8,12 +8,12 @@ _MAX_ITERATIONS = 10 _MAX_DISTANCE_VERTICAL = 5.0 -_MAX_DISTANCE_HORIZONTAL = 1.0 +_MAX_DISTANCE_HORIZONTAL = 4.0 #1.0 # How many 2-D bases to try during horizontal adjacency check. When 1, only the standard axes will be considered. # When 2, standard axes + 45 degree rotated will be considered. The tried axes will be equally spaced. The higher -# this number, the lower the possibility of false negatives in Inside and NextTo. -_HORIZONTAL_AXIS_COUNT = 5 +# this number, the lower the possibility of false negatives in Inside. +_HORIZONTAL_AXIS_COUNT = 3 AxisAdjacencyList = namedtuple("AxisAdjacencyList", ("positive_neighbors", "negative_neighbors")) @@ -29,7 +29,7 @@ def get_equidistant_coordinate_planes(n_planes): The samples will cover all 360 degrees (although rotational symmetry is assumed, e.g. if you take into account the axis index and the - positive/negative directions, only 1/4 of the possible coordinate (1 quadrant, np.pi / 2.0) + positive/negative directions, only 1/4 of the possible coordinate planes will be sampled: the ones where the first axis' positive direction is in the first quadrant). @@ -40,7 +40,7 @@ def get_equidistant_coordinate_planes(n_planes): corresponding to the axis. """ # Compute the positive directions of the 1st axis of each plane. - first_axis_angles = np.linspace(0, np.pi / 2, n_planes) + first_axis_angles = np.linspace(0, np.pi / 4, n_planes) first_axes = np.stack( [np.cos(first_axis_angles), np.sin(first_axis_angles), np.zeros_like(first_axis_angles)], axis=1 ) diff --git a/igibson/object_states/dirty.py b/igibson/object_states/dirty.py index 7ba5f7066..2afcf3dbf 100644 --- a/igibson/object_states/dirty.py +++ b/igibson/object_states/dirty.py @@ -1,7 +1,6 @@ from igibson.object_states import AABB from igibson.object_states.object_state_base import AbsoluteObjectState, BooleanState from igibson.objects.particles import Dust, Stain -from igibson.utils.constants import SemanticClass CLEAN_THRESHOLD = 0.5 FLOOR_CLEAN_THRESHOLD = 0.75 @@ -26,7 +25,7 @@ def __init__(self, obj): self.initial_dump = None def _initialize(self): - self.dirt = self.DIRT_CLASS(self.obj, initial_dump=self.initial_dump, class_id=SemanticClass.SCENE_OBJS) + self.dirt = self.DIRT_CLASS(self.obj, initial_dump=self.initial_dump) self.simulator.import_particle_system(self.dirt) def _get_value(self): diff --git a/igibson/object_states/factory.py b/igibson/object_states/factory.py index 1d8b3db67..af2f7ca8c 100644 --- a/igibson/object_states/factory.py +++ b/igibson/object_states/factory.py @@ -22,7 +22,6 @@ InsideRoomTypes, MaxTemperature, NextTo, - ObjectsInFOVOfRobot, OnFloor, OnTop, Open, @@ -50,7 +49,7 @@ "freezable": [Frozen], "heatSource": [HeatSourceOrSink], "openable": [Open], - "robot": ROOM_STATES + [ObjectsInFOVOfRobot], + "robot": ROOM_STATES, "sliceable": [Sliced], "slicer": [Slicer], "soakable": [Soaked], @@ -128,7 +127,7 @@ def get_object_state_instance(state_class, obj, params=None): return state_class(obj, **params) -def prepare_object_states(obj, abilities=None): +def prepare_object_states(obj, abilities=None, online=True): """ Prepare the state dictionary for an object by generating the appropriate object state instances. @@ -139,6 +138,9 @@ def prepare_object_states(obj, abilities=None): :param obj: The object to generate states for. :param abilities: dict in the form of {ability: {param: value}} containing object abilities and parameters. + :param online: Whether or not the states should be generated for an online + object. Offline mode involves using dummy objects rather than real state + objects. """ if abilities is None: abilities = {} diff --git a/igibson/object_states/heat_source_or_sink.py b/igibson/object_states/heat_source_or_sink.py index 6fe7dc973..1bbcb7a1a 100644 --- a/igibson/object_states/heat_source_or_sink.py +++ b/igibson/object_states/heat_source_or_sink.py @@ -13,7 +13,6 @@ # The name of the heat source link inside URDF files. from igibson.objects.visual_marker import VisualMarker -from igibson.utils.constants import SemanticClass _HEATING_ELEMENT_LINK_NAME = "heat_source" @@ -125,13 +124,9 @@ def _initialize(self): super(HeatSourceOrSink, self)._initialize() self.initialize_link_mixin() self.marker = VisualMarker( - visual_shape=p.GEOM_MESH, - filename=_HEATING_ELEMENT_MARKER_FILENAME, - scale=_HEATING_ELEMENT_MARKER_SCALE, - class_id=SemanticClass.SCENE_OBJS, - rendering_params={"shadow_caster": True}, + visual_shape=p.GEOM_MESH, filename=_HEATING_ELEMENT_MARKER_FILENAME, scale=_HEATING_ELEMENT_MARKER_SCALE ) - self.simulator.import_object(self.marker) + self.simulator.import_object(self.marker, use_pbr=False, use_pbr_mapping=False) self.marker.set_position([0, 0, -100]) def _update(self): diff --git a/igibson/object_states/inside.py b/igibson/object_states/inside.py index 39dd09874..ac1ffc39b 100644 --- a/igibson/object_states/inside.py +++ b/igibson/object_states/inside.py @@ -10,7 +10,6 @@ from igibson.object_states.object_state_base import BooleanState, RelativeObjectState from igibson.object_states.pose import Pose from igibson.object_states.utils import clear_cached_states, sample_kinematics -from igibson.utils.utils import restoreState class Inside(PositionalValidationMemoizedObjectStateMixin, KinematicsMixin, RelativeObjectState, BooleanState): @@ -36,7 +35,7 @@ def _set_value(self, other, new_value, use_ray_casting_method=False): if sampling_success: break else: - restoreState(state_id) + p.restoreState(state_id) p.removeState(state_id) diff --git a/igibson/object_states/on_floor.py b/igibson/object_states/on_floor.py index e4679c12c..c3adad462 100644 --- a/igibson/object_states/on_floor.py +++ b/igibson/object_states/on_floor.py @@ -8,7 +8,6 @@ from igibson.object_states.utils import clear_cached_states, get_center_extent, sample_kinematics # TODO: remove after split floors -from igibson.utils.utils import restoreState class RoomFloor(object): @@ -47,7 +46,7 @@ def _set_value(self, other, new_value): if sampling_success: break else: - restoreState(state_id) + p.restoreState(state_id) p.removeState(state_id) diff --git a/igibson/object_states/on_top.py b/igibson/object_states/on_top.py index 3ae783ae8..3a4a301c5 100644 --- a/igibson/object_states/on_top.py +++ b/igibson/object_states/on_top.py @@ -7,7 +7,6 @@ from igibson.object_states.object_state_base import BooleanState, RelativeObjectState from igibson.object_states.touching import Touching from igibson.object_states.utils import clear_cached_states, sample_kinematics -from igibson.utils.utils import restoreState class OnTop(PositionalValidationMemoizedObjectStateMixin, RelativeObjectState, BooleanState): @@ -33,7 +32,7 @@ def _set_value(self, other, new_value, use_ray_casting_method=False): if sampling_success: break else: - restoreState(state_id) + p.restoreState(state_id) p.removeState(state_id) @@ -50,7 +49,18 @@ def _get_value(self, other, use_ray_casting_method=False): # Then check vertical adjacency - it's the second least # costly. adjacency = self.obj.states[VerticalAdjacency].get_value() - return ( - other.get_body_id() in adjacency.negative_neighbors - and other.get_body_id() not in adjacency.positive_neighbors - ) + + # NOTE (njk): This below if-else logic is different than the + # original iGibson code. It's necessary because a shelf + # has multiple levels and thus, when an object is in one of the + # shelves, it is in both the negative_neighbors and + # positive_neighbors. + if other.category == 'shelf': + result = other.get_body_id() in adjacency.negative_neighbors + else: + result = ( + other.get_body_id() in adjacency.negative_neighbors + and other.get_body_id() not in adjacency.positive_neighbors + ) + + return result diff --git a/igibson/object_states/open.py b/igibson/object_states/open.py index d8be9d175..8db1266e0 100644 --- a/igibson/object_states/open.py +++ b/igibson/object_states/open.py @@ -12,51 +12,31 @@ p.JOINT_REVOLUTE: 0.05, p.JOINT_PRISMATIC: 0.05, } -_OPEN_SAMPLING_ATTEMPTS = 5 _METADATA_FIELD = "openable_joint_ids" -_BOTH_SIDES_METADATA_FIELD = "openable_both_sides" -def _compute_joint_threshold(joint_info, joint_direction): +def _compute_joint_threshold(joint_info): # Convert fractional threshold to actual joint position. f = _JOINT_THRESHOLD_BY_TYPE[joint_info.jointType] - closed_end = joint_info.jointLowerLimit if joint_direction == 1 else joint_info.jointUpperLimit - open_end = joint_info.jointUpperLimit if joint_direction == 1 else joint_info.jointLowerLimit - threshold = (1 - f) * closed_end + f * open_end - return threshold, open_end, closed_end - - -def _is_in_range(position, threshold, range_end): - # Note that we are unable to do an actual range check here because the joint positions can actually go - # slightly further than the denoted joint limits. - if range_end > threshold: - return position > threshold - else: - return position < threshold + return (1 - f) * joint_info.jointLowerLimit + f * joint_info.jointUpperLimit def _get_relevant_joints(obj): if not hasattr(obj, "metadata"): - return None, None, None - - both_sides = obj.metadata[_BOTH_SIDES_METADATA_FIELD] if _BOTH_SIDES_METADATA_FIELD in obj.metadata else False + return None # Get joint IDs and names from metadata annotation. If object doesn't have the openable metadata, # we stop here and return Open=False. - if _METADATA_FIELD not in obj.metadata: + if _METADATA_FIELD not in obj.metadata or obj.metadata[_METADATA_FIELD] == []: print("No openable joint metadata found for object %s" % obj.name) - return None, None, None + return None joint_metadata = obj.metadata[_METADATA_FIELD] - - # The joint metadata is in the format of [(joint_id, joint_name), ...] for legacy annotations and - # [(joint_id, joint_name, joint_direction), ...] for direction-annotated objects. - joint_names = [m[1] for m in joint_metadata] - joint_directions = [m[2] if len(m) > 2 else 1 for m in joint_metadata] + _, joint_names = tuple(zip(*joint_metadata)) if not joint_names: print("No openable joint was listed in metadata for object %s" % obj.name) - return None, None, None + return None joint_names = set(obj.get_prefixed_joint_name(joint_name).encode(encoding="utf-8") for joint_name in joint_names) # Get joint infos and compute openness thresholds. @@ -74,99 +54,49 @@ def _get_relevant_joints(obj): ) assert all(joint_info.jointType in _JOINT_THRESHOLD_BY_TYPE.keys() for joint_info in relevant_joint_infos) - return both_sides, relevant_joint_infos, joint_directions + return relevant_joint_infos class Open(CachingEnabledObjectState, BooleanState): def _compute_value(self): - both_sides, relevant_joint_infos, joint_directions = _get_relevant_joints(self.obj) + relevant_joint_infos = _get_relevant_joints(self.obj) if not relevant_joint_infos: return False - # The "sides" variable is used to check open/closed state for objects whose joints can switch - # positions. These objects are annotated with the both_sides annotation and the idea is that switching - # the directions of *all* of the joints results in a similarly valid checkable state. As a result, to check - # each "side", we multiply *all* of the joint directions with the coefficient belonging to that side, which - # may be 1 or -1. - sides = [1, -1] if both_sides else [1] - - sides_openness = [] - for side in sides: - # Compute a boolean openness state for each joint by comparing positions to thresholds. - joint_ids = [joint_info.jointIndex for joint_info in relevant_joint_infos] - joint_thresholds = ( - _compute_joint_threshold(joint_info, joint_direction * side) - for joint_info, joint_direction in zip(relevant_joint_infos, joint_directions) - ) - joint_positions = utils.get_joint_positions(self.obj.get_body_id(), joint_ids) - joint_openness = ( - _is_in_range(position, threshold, open_end) - for position, (threshold, open_end, closed_end) in zip(joint_positions, joint_thresholds) - ) - - # Looking from this side, the object is open if any of its joints is open. - sides_openness.append(any(joint_openness)) - - # The object is open only if it's open from all of its sides. - return all(sides_openness) - - def _set_value(self, new_value, fully=False): - """ - Set the openness state, either to a random joint position satisfying the new value, or fully open/closed. - - @param new_value: bool value for the openness state of the object. - @param fully: whether the object should be fully opened/closed (e.g. all relevant joints to 0/1). - @return: bool indicating setter success. Failure may happen due to unannotated objects. - """ - both_sides, relevant_joint_infos, joint_directions = _get_relevant_joints(self.obj) + # Compute a boolean openness state for each joint by comparing positions to thresholds. + joint_ids = [joint_info.jointIndex for joint_info in relevant_joint_infos] + joint_thresholds = (_compute_joint_threshold(joint_info) for joint_info in relevant_joint_infos) + joint_positions = utils.get_joint_positions(self.obj.get_body_id(), joint_ids) + joint_openness = (position > threshold for position, threshold in zip(joint_positions, joint_thresholds)) + + # Return open if any joint is open, false otherwise. + return any(joint_openness) + + def _set_value(self, new_value): + relevant_joint_infos = _get_relevant_joints(self.obj) if not relevant_joint_infos: return False - # The "sides" variable is used to check open/closed state for objects whose joints can switch - # positions. These objects are annotated with the both_sides annotation and the idea is that switching - # the directions of *all* of the joints results in a similarly valid checkable state. We want our object to be - # open from *both* of the two sides, and I was too lazy to implement the logic for this without rejection - # sampling, so that's what we do. - # TODO: Implement a sampling method that's guaranteed to be correct, ditch the rejection method. - sides = [1, -1] if both_sides else [1] - - for _ in range(_OPEN_SAMPLING_ATTEMPTS): - side = random.choice(sides) - - # All joints are relevant if we are closing, but if we are opening let's sample a subset. - if new_value and not fully: - num_to_open = random.randint(1, len(relevant_joint_infos)) - relevant_joint_infos = random.sample(relevant_joint_infos, num_to_open) - - # Go through the relevant joints & set random positions. - for joint_info, joint_direction in zip(relevant_joint_infos, joint_directions): - threshold, open_end, closed_end = _compute_joint_threshold(joint_info, joint_direction * side) - - # Get the range - if new_value: - joint_range = (threshold, open_end) - else: - joint_range = (threshold, closed_end) - - if fully: - joint_pos = joint_range[1] - else: - # Convert the range to the format numpy accepts. - low = min(joint_range) - high = max(joint_range) - - # Sample a position. - joint_pos = random.uniform(low, high) - - # Save sampled position. - utils.set_joint_position(self.obj.get_body_id(), joint_info.jointIndex, joint_pos) - - # If we succeeded, return now. - if self._compute_value() == new_value: - return True - - # We exhausted our attempts and could not find a working sample. - return False + # All joints are relevant if we are closing, but if we are opening let's sample a subset. + if new_value: + num_to_open = random.randint(1, len(relevant_joint_infos)) + relevant_joint_infos = random.sample(relevant_joint_infos, num_to_open) + + # Go through the relevant joints & set random positions. + for joint_info in relevant_joint_infos: + joint_threshold = _compute_joint_threshold(joint_info) + + if new_value: + # Sample an open position. + joint_pos = random.uniform(joint_threshold, joint_info.jointUpperLimit) + else: + # Sample a closed position. + joint_pos = random.uniform(joint_info.jointLowerLimit, joint_threshold) + + # Save sampled position. + utils.set_joint_position(self.obj.get_body_id(), joint_info.jointIndex, joint_pos) + + return True # We don't need to do anything here - since the joints are saved, this should work directly. def _dump(self): diff --git a/igibson/object_states/robot_related_states.py b/igibson/object_states/robot_related_states.py index 69611ae5c..6cfefdaf8 100644 --- a/igibson/object_states/robot_related_states.py +++ b/igibson/object_states/robot_related_states.py @@ -33,7 +33,7 @@ def _compute_value(self): if not robot: return False - robot_pos = robot.links["body"].get_position() + robot_pos = robot.parts["body"].get_position() object_pos, _ = self.obj.states[Pose].get_value() return np.linalg.norm(object_pos - np.array(robot_pos)) < _IN_REACH_DISTANCE_THRESHOLD @@ -62,7 +62,7 @@ def _compute_value(self): if not scene or not hasattr(scene, "get_room_instance_by_point"): return False - robot_pos = robot.links["body"].get_position() + robot_pos = robot.parts["body"].get_position() robot_room = scene.get_room_instance_by_point(np.array(robot_pos[:2])) object_rooms = self.obj.states[InsideRoomTypes].get_value() @@ -99,16 +99,13 @@ def load(self, data): class InFOVOfRobot(CachingEnabledObjectState, BooleanState): - @staticmethod - def get_optional_dependencies(): - return CachingEnabledObjectState.get_optional_dependencies() + [ObjectsInFOVOfRobot] - def _compute_value(self): - robot = _get_behavior_robot(self.simulator) - if not robot: - return False - - return self.obj.get_body_id() in robot.links["body"].states[ObjectsInFOVOfRobot].get_value() + seg = self.simulator.renderer.render_robot_cameras(modes="ins_seg")[0][:, :, 0] + seg = np.round(seg * MAX_INSTANCE_COUNT) + main_body_instances = [ + inst.id for inst in self.obj.renderer_instances if inst.pybullet_uuid == self.obj.get_body_id() + ] + return np.any(np.isin(seg, main_body_instances)) def _set_value(self, new_value): raise NotImplementedError("InFOVOfRobot state currently does not support setting.") @@ -119,28 +116,3 @@ def _dump(self): def load(self, data): pass - - -class ObjectsInFOVOfRobot(CachingEnabledObjectState): - def __init__(self, *args, **kwargs): - super(CachingEnabledObjectState, self).__init__(*args, **kwargs) - - def _compute_value(self): - # Pass the FOV through the instance-to-body ID mapping. - seg = self.obj.parent.render_camera_image(modes="ins_seg")[0][:, :, 0] - seg = np.round(seg * MAX_INSTANCE_COUNT).astype(int) - body_ids = self.simulator.renderer.get_pb_ids_for_instance_ids(seg) - - # Pixels that don't contain an object are marked -1 but we don't want to include that - # as a body ID. - return set(np.unique(body_ids)) - {-1} - - def _set_value(self, new_value): - raise NotImplementedError("ObjectsInFOVOfRobot state currently does not support setting.") - - # Nothing to do here. - def _dump(self): - pass - - def load(self, data): - pass diff --git a/igibson/object_states/soaked.py b/igibson/object_states/soaked.py index e41e90b2c..ca74bf521 100644 --- a/igibson/object_states/soaked.py +++ b/igibson/object_states/soaked.py @@ -24,7 +24,7 @@ def _update(self): item.bodyUniqueIdB for item in list(self.obj.states[ContactBodies].get_value()) ) for particle in water_source_obj.states[WaterSource].water_stream.get_active_particles(): - if particle.get_body_id() in contacted_water_body_ids: + if particle.body_id in contacted_water_body_ids: self.value = True self.update_texture() diff --git a/igibson/object_states/toggle.py b/igibson/object_states/toggle.py index 892ccae40..3d678f66d 100644 --- a/igibson/object_states/toggle.py +++ b/igibson/object_states/toggle.py @@ -1,10 +1,11 @@ +import numpy as np import pybullet as p from igibson.object_states.link_based_state_mixin import LinkBasedStateMixin from igibson.object_states.object_state_base import AbsoluteObjectState, BooleanState from igibson.object_states.texture_change_state_mixin import TextureChangeStateMixin from igibson.objects.visual_marker import VisualMarker -from igibson.utils.constants import PyBulletSleepState, SemanticClass, SimulatorMode +from igibson.utils.constants import PyBulletSleepState from igibson.utils.utils import brighten_texture _TOGGLE_DISTANCE_THRESHOLD = 0.1 @@ -34,18 +35,8 @@ def get_state_link_name(): def _initialize(self): super(ToggledOn, self)._initialize() if self.initialize_link_mixin(): - self.visual_marker_on = VisualMarker( - rgba_color=[0, 1, 0, 0.5], - radius=_TOGGLE_BUTTON_RADIUS, - class_id=SemanticClass.SCENE_OBJS, - rendering_params={"use_pbr": True, "use_pbr_mapping": True}, - ) - self.visual_marker_off = VisualMarker( - rgba_color=[1, 0, 0, 0.5], - radius=_TOGGLE_BUTTON_RADIUS, - class_id=SemanticClass.SCENE_OBJS, - rendering_params={"use_pbr": True, "use_pbr_mapping": True}, - ) + self.visual_marker_on = VisualMarker(rgba_color=[0, 1, 0, 0.5], radius=_TOGGLE_BUTTON_RADIUS) + self.visual_marker_off = VisualMarker(rgba_color=[1, 0, 0, 0.5], radius=_TOGGLE_BUTTON_RADIUS) self.simulator.import_object(self.visual_marker_on) self.visual_marker_on.set_position(_TOGGLE_MARKER_OFF_POSITION) self.simulator.import_object(self.visual_marker_off) @@ -73,7 +64,7 @@ def _update(self): # swap two types of markers when toggled # when hud overlay is on, we show the toggle buttons, otherwise the buttons are hidden - if self.simulator.mode == SimulatorMode.VR: + if self.simulator.can_access_vr_context: hud_overlay_show_state = self.simulator.get_hud_show_state() else: hud_overlay_show_state = False diff --git a/igibson/object_states/under.py b/igibson/object_states/under.py index b669e350e..83da0f1ac 100644 --- a/igibson/object_states/under.py +++ b/igibson/object_states/under.py @@ -6,7 +6,6 @@ from igibson.object_states.memoization import PositionalValidationMemoizedObjectStateMixin from igibson.object_states.object_state_base import BooleanState, RelativeObjectState from igibson.object_states.utils import clear_cached_states, sample_kinematics -from igibson.utils.utils import restoreState class Under(PositionalValidationMemoizedObjectStateMixin, RelativeObjectState, BooleanState): @@ -30,7 +29,7 @@ def _set_value(self, other, new_value): if sampling_success: break else: - restoreState(state_id) + p.restoreState(state_id) p.removeState(state_id) diff --git a/igibson/object_states/utils.py b/igibson/object_states/utils.py index 5bed31f68..fa272305a 100644 --- a/igibson/object_states/utils.py +++ b/igibson/object_states/utils.py @@ -1,9 +1,6 @@ -import itertools - import cv2 import numpy as np import pybullet as p -import trimesh from IPython import embed from scipy.spatial.transform import Rotation as R @@ -18,8 +15,7 @@ ) from igibson.object_states.aabb import AABB from igibson.object_states.object_state_base import CachingEnabledObjectState -from igibson.utils import sampling_utils, utils -from igibson.utils.utils import restoreState +from igibson.utils import sampling_utils _ON_TOP_RAY_CASTING_SAMPLING_PARAMS = { # "hit_to_plane_threshold": 0.1, # TODO: Tune this parameter. @@ -65,27 +61,8 @@ def detect_collision(bodyA): return collision -def detect_collision(bodyA): - collision = False - for body_id in range(p.getNumBodies()): - if body_id == bodyA: - continue - closest_points = p.getClosestPoints(bodyA, body_id, distance=0.01) - if len(closest_points) > 0: - collision = True - break - return collision - - def sample_kinematics( - predicate, - objA, - objB, - binary_state, - use_ray_casting_method=False, - max_trials=100, - z_offset=0.05, - skip_falling=False, + predicate, objA, objB, binary_state, use_ray_casting_method=False, max_trials=100, z_offset=0.05, skip_falling=False ): if not binary_state: raise NotImplementedError() @@ -127,19 +104,16 @@ def sample_kinematics( else: assert False, "predicate is not onTop or inside: {}".format(predicate) - # Retrieve base CoM frame-aligned bounding box parallel to the XY plane - parallel_bbox_center, parallel_bbox_orn, parallel_bbox_extents, _ = objA.get_base_aligned_bounding_box( - xy_aligned=True - ) + aabb = get_aabb(objA.get_body_id()) + aabb_center, aabb_extent = get_aabb_center(aabb), get_aabb_extent(aabb) # TODO: Get this to work with non-URDFObject objects. sampling_results = sampling_utils.sample_cuboid_on_object( objB, num_samples=1, - cuboid_dimensions=parallel_bbox_extents, + cuboid_dimensions=aabb_extent, axis_probabilities=[0, 0, 1], refuse_downwards=True, - undo_padding=True, **params ) @@ -148,22 +122,18 @@ def sample_kinematics( sampling_success = sampled_vector is not None if sampling_success: - # Move the object from the original parallel bbox to the sampled bbox - parallel_bbox_rotation = R.from_quat(parallel_bbox_orn) + # Find the delta from the object's CoM to its AABB centroid + diff = old_pos - aabb_center + sample_rotation = R.from_quat(sampled_quaternion) original_rotation = R.from_quat(orientation) + combined_rotation = sample_rotation * original_rotation - # The additional orientation to be applied should be the delta orientation - # between the parallel bbox orientation and the sample orientation - additional_rotation = sample_rotation * parallel_bbox_rotation.inv() - combined_rotation = additional_rotation * original_rotation - orientation = combined_rotation.as_quat() + # Rotate it using the quaternion + rotated_diff = sample_rotation.apply(diff) - # The delta vector between the base CoM frame and the parallel bbox center needs to be rotated - # by the same additional orientation - diff = old_pos - parallel_bbox_center - rotated_diff = additional_rotation.apply(diff) pos = sampled_vector + rotated_diff + orientation = combined_rotation.as_quat() else: random_idx = np.random.randint(len(objB.supporting_surfaces[predicate].keys())) body_id, link_id = list(objB.supporting_surfaces[predicate].keys())[random_idx] @@ -210,14 +180,14 @@ def sample_kinematics( objA.set_position_orientation(pos, orientation) success = not detect_collision(objA.get_body_id()) # len(p.getContactPoints(objA.get_body_id())) == 0 - if igibson.debug_sampling: + if True: #igibson.debug_sampling: print("sample_kinematics", success) embed() if success: break else: - restoreState(state_id) + p.restoreState(state_id) p.removeState(state_id) @@ -231,3 +201,162 @@ def sample_kinematics( break return success + +def continuous_param_kinematics( + predicate, objA, objB, binary_state, continuous_params, use_ray_casting_method=False, max_trials=100, z_offset=0.05, skip_falling=False +): + if not binary_state: + raise NotImplementedError() + + sample_on_floor = predicate == "onFloor" + + if not use_ray_casting_method and not sample_on_floor and predicate not in objB.supporting_surfaces: + return False + + objA.force_wakeup() + if not sample_on_floor: + objB.force_wakeup() + + state_id = p.saveState() + for i in range(max_trials): + pos = None + if hasattr(objA, "orientations") and objA.orientations is not None: + orientation = objA.sample_orientation() + else: + orientation = [0, 0, 0, 1] + + # Orientation needs to be set for stable_z_on_aabb to work correctly + # Position needs to be set to be very far away because the object's + # original position might be blocking rays (use_ray_casting_method=True) + old_pos = np.array([200, 200, 200]) + objA.set_position_orientation(old_pos, orientation) + + # TODO HERE Add this continuous_param + + if sample_on_floor: + # TODO Warning Place on Floor does not check + #_, pos = objB.scene.get_random_point_by_room_instance(objB.room_instance) + pos = continuous_params + + if pos is not None: + pos[2] = stable_z_on_aabb(objA.get_body_id(), ([0, 0, pos[2]], [0, 0, pos[2]])) + else: + if use_ray_casting_method: + if predicate == "onTop": + params = _ON_TOP_RAY_CASTING_SAMPLING_PARAMS + elif predicate == "inside": + params = _INSIDE_RAY_CASTING_SAMPLING_PARAMS + else: + assert False, "predicate is not onTop or inside: {}".format(predicate) + + aabb = get_aabb(objA.get_body_id()) + aabb_center, aabb_extent = get_aabb_center(aabb), get_aabb_extent(aabb) + + # TODO: Get this to work with non-URDFObject objects. + # sampling_results = sampling_utils.sample_cuboid_on_object( + # objB, + # num_samples=1, + # cuboid_dimensions=aabb_extent, + # axis_probabilities=[0, 0, 1], + # refuse_downwards=True, + # **params + # ) + sampling_results = [[continuous_params[0:3], None, continuous_params[3:7]]] + + sampling_success = sampling_utils.check_sample_cuboid_on_object( + sampling_results, + objB, + num_samples=1, + cuboid_dimensions=aabb_extent, + axis_probabilities=[0, 0, 1], + refuse_downwards=True, + **params + ) + + sampled_vector = sampling_results[0][0] + sampled_quaternion = sampling_results[0][2] + + #sampling_success = sampled_vector is not None + if sampling_success: + # Find the delta from the object's CoM to its AABB centroid + diff = old_pos - aabb_center + + sample_rotation = R.from_quat(sampled_quaternion) + original_rotation = R.from_quat(orientation) + combined_rotation = sample_rotation * original_rotation + + # Rotate it using the quaternion + rotated_diff = sample_rotation.apply(diff) + + pos = sampled_vector + rotated_diff + orientation = combined_rotation.as_quat() + else: + random_idx = np.random.randint(len(objB.supporting_surfaces[predicate].keys())) + body_id, link_id = list(objB.supporting_surfaces[predicate].keys())[random_idx] + random_height_idx = np.random.randint(len(objB.supporting_surfaces[predicate][(body_id, link_id)])) + height, height_map = objB.supporting_surfaces[predicate][(body_id, link_id)][random_height_idx] + obj_half_size = np.max(objA.bounding_box) / 2 * 100 + obj_half_size_scaled = np.array([obj_half_size / objB.scale[1], obj_half_size / objB.scale[0]]) + obj_half_size_scaled = np.ceil(obj_half_size_scaled).astype(np.int) + height_map_eroded = cv2.erode(height_map, np.ones(obj_half_size_scaled, np.uint8)) + + valid_pos = np.array(height_map_eroded.nonzero()) + if valid_pos.shape[1] != 0: + random_pos_idx = np.random.randint(valid_pos.shape[1]) + random_pos = valid_pos[:, random_pos_idx] + y_map, x_map = random_pos + y = y_map / 100.0 - 2 + x = x_map / 100.0 - 2 + z = height + + pos = np.array([x, y, z]) + pos *= objB.scale + + # the supporting surface is defined w.r.t to the link frame, not + # the inertial frame + if link_id == -1: + link_pos, link_orn = p.getBasePositionAndOrientation(body_id) + dynamics_info = p.getDynamicsInfo(body_id, -1) + inertial_pos = dynamics_info[3] + inertial_orn = dynamics_info[4] + inv_inertial_pos, inv_inertial_orn = p.invertTransform(inertial_pos, inertial_orn) + link_pos, link_orn = p.multiplyTransforms( + link_pos, link_orn, inv_inertial_pos, inv_inertial_orn + ) + else: + link_pos, link_orn = get_link_pose(body_id, link_id) + pos = matrix_from_quat(link_orn).dot(pos) + np.array(link_pos) + z = stable_z_on_aabb(objA.get_body_id(), ([0, 0, pos[2]], [0, 0, pos[2]])) + pos[2] = z + + if pos is None: + success = False + else: + pos[2] += z_offset + objA.set_position_orientation(pos, orientation) + # TODO (wmcclinton) switch ^ with: iGibson/igibson/utils/behavior_robot_planning_utils.py -> plan_hand_motion_br + # May need to get the transform of the obj relative to the hand and then use that to get the hand position and orientation after the object is placed + success = not detect_collision(objA.get_body_id()) # len(p.getContactPoints(objA.get_body_id())) == 0 + + if igibson.debug_sampling: + print("sample_kinematics", success) + embed() + + if success: + break + else: + p.restoreState(state_id) + + if success and not skip_falling: + objA.set_position_orientation(pos, orientation) + # Let it fall for 0.2 second + physics_timestep = p.getPhysicsEngineParameters()["fixedTimeStep"] + for _ in range(int(0.2 / physics_timestep)): + p.stepSimulation() + if len(p.getContactPoints(bodyA=objA.get_body_id())) > 0: + break + + p.restoreState(state_id) + p.removeState(state_id) + + return success \ No newline at end of file diff --git a/igibson/object_states/water_source.py b/igibson/object_states/water_source.py index 2efd8eebf..6332ca244 100644 --- a/igibson/object_states/water_source.py +++ b/igibson/object_states/water_source.py @@ -5,7 +5,6 @@ from igibson.object_states.object_state_base import AbsoluteObjectState from igibson.object_states.toggle import ToggledOn from igibson.objects.particles import WaterStream -from igibson.utils.constants import SemanticClass _WATER_SOURCE_LINK_NAME = "water_source" @@ -42,9 +41,7 @@ def _initialize(self): return water_source_position = list(np.array(water_source_position) + _OFFSET_FROM_LINK) - self.water_stream = WaterStream( - water_source_position, num=_NUM_DROPS, initial_dump=self.initial_dump, class_id=SemanticClass.SCENE_OBJS - ) + self.water_stream = WaterStream(water_source_position, num=_NUM_DROPS, initial_dump=self.initial_dump) self.simulator.import_particle_system(self.water_stream) del self.initial_dump @@ -65,7 +62,7 @@ def _update(self): # water reusing logic contacted_water_body_ids = set(item.bodyUniqueIdB for item in list(self.obj.states[ContactBodies].get_value())) for particle in self.water_stream.get_active_particles(): - if particle.get_body_id() in contacted_water_body_ids: + if particle.body_id in contacted_water_body_ids: self.water_stream.stash_particle(particle) def _set_value(self, new_value): diff --git a/igibson/objects/articulated_object.py b/igibson/objects/articulated_object.py index 3b84c58b2..dc9937bce 100644 --- a/igibson/objects/articulated_object.py +++ b/igibson/objects/articulated_object.py @@ -1,4 +1,3 @@ -import itertools import json import logging import math @@ -9,32 +8,24 @@ import xml.etree.ElementTree as ET import cv2 -import networkx as nx import numpy as np import pybullet as p import trimesh -from scipy.spatial.transform import Rotation import igibson from igibson.external.pybullet_tools.utils import ( - get_all_links, - get_center_extent, get_joint_info, get_joints, - get_link_name, - get_link_state, link_from_name, matrix_from_quat, quat_from_matrix, set_joint_position, ) +from igibson.object_states.factory import prepare_object_states from igibson.object_states.texture_change_state_mixin import TextureChangeStateMixin -from igibson.objects.object_base import NonRobotObject, SingleBodyObject +from igibson.object_states.utils import clear_cached_states from igibson.objects.stateful_object import StatefulObject from igibson.render.mesh_renderer.materials import ProceduralMaterial, RandomizedMaterial -from igibson.utils import utils -from igibson.utils.constants import SemanticClass -from igibson.utils.semantics_utils import CLASS_NAME_TO_CLASS_ID from igibson.utils.urdf_utils import add_fixed_link, get_base_link_name, round_up, save_urdfs_without_floating_joints from igibson.utils.utils import get_transform_from_xyz_rpy, quatXYZWFromRotMat, rotate_vector_3d @@ -48,19 +39,19 @@ OBJECT_TAXONOMY = None -class ArticulatedObject(StatefulObject, SingleBodyObject): +class ArticulatedObject(StatefulObject): """ Articulated objects are defined in URDF files. They are passive (no motors). """ - def __init__(self, filename, scale=1, merge_fixed_links=True, **kwargs): - super(ArticulatedObject, self).__init__(**kwargs) + def __init__(self, filename, scale=1, merge_fixed_links=True): + super(ArticulatedObject, self).__init__() self.filename = filename self.scale = scale self.merge_fixed_links = merge_fixed_links - def _load(self, simulator): + def _load(self): """ Load the object into pybullet """ @@ -71,18 +62,39 @@ def _load(self, simulator): body_id = p.loadURDF(self.filename, globalScaling=self.scale, flags=flags) self.mass = p.getDynamicsInfo(body_id, -1)[0] + self.body_id = body_id + self.create_link_name_to_vm_map(body_id) + return body_id - simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params) - - return [body_id] + def create_link_name_to_vm_map(self, body_id): + self.link_name_to_vm = [] + link_name_to_vm_urdf = {} + for visual_shape in p.getVisualShapeData(body_id): + id, link_id, type, dimensions, filename, rel_pos, rel_orn, color = visual_shape[:8] + try: + if link_id == -1: + link_name = p.getBodyInfo(id)[0].decode("utf-8") + else: + link_name = p.getJointInfo(id, link_id)[12].decode("utf-8") + if not link_name in link_name_to_vm_urdf: + link_name_to_vm_urdf[link_name] = [] + else: + raise ValueError("link name clashing") + link_name_to_vm_urdf[link_name].append(filename.decode("utf-8")) + except: + pass + self.link_name_to_vm = [link_name_to_vm_urdf] def force_wakeup(self): """ Force wakeup sleeping objects """ - for joint_id in range(p.getNumJoints(self.get_body_id())): - p.changeDynamics(self.get_body_id(), joint_id, activationState=p.ACTIVATION_STATE_WAKE_UP) - p.changeDynamics(self.get_body_id(), -1, activationState=p.ACTIVATION_STATE_WAKE_UP) + for joint_id in range(p.getNumJoints(self.body_id)): + p.changeDynamics(self.body_id, joint_id, activationState=p.ACTIVATION_STATE_WAKE_UP) + p.changeDynamics(self.body_id, -1, activationState=p.ACTIVATION_STATE_WAKE_UP) + + def get_body_id(self): + return self.body_id class RBOObject(ArticulatedObject): @@ -96,7 +108,7 @@ def __init__(self, name, scale=1): super(RBOObject, self).__init__(filename, scale) -class URDFObject(StatefulObject, NonRobotObject): +class URDFObject(StatefulObject): """ URDFObjects are instantiated from a URDF file. They can be composed of one or more links and joints. They should be passive. We use this class to @@ -127,9 +139,6 @@ def __init__( joint_positions=None, merge_fixed_links=True, ignore_visual_shape=False, - class_id=None, - rendering_params=None, - **kwargs ): """ :param filename: urdf file path of that object model @@ -152,22 +161,9 @@ def __init__( :param visualize_primitives: whether to render geometric primitives :param joint_positions: Joint positions, keyed by body index and joint name, in the form of List[Dict[name, position]] - :param class_id: Class ID to override default class ID with (default will be constructed using category) - :param rendering_params: Rendering params to override default category-based rendering params. """ - # Load abilities from taxonomy if needed & possible - if abilities is None: - if OBJECT_TAXONOMY is not None: - taxonomy_class = OBJECT_TAXONOMY.get_class_name_from_igibson_category(category) - if taxonomy_class is not None: - abilities = OBJECT_TAXONOMY.get_abilities(taxonomy_class) - else: - abilities = {} - else: - abilities = {} - assert isinstance(abilities, dict), "Object abilities must be in dictionary form." + super(URDFObject, self).__init__() - # Save important arguments. self.name = name self.category = category self.in_rooms = in_rooms @@ -183,20 +179,19 @@ def __init__( self.room_floor = None self.ignore_visual_shape = ignore_visual_shape - # Update rendering and class parameters. - final_rendering_params = {} - if self.category in ["walls", "floors", "ceilings"]: - final_rendering_params["use_pbr"] = False - final_rendering_params["use_pbr_mapping"] = False - if self.category == "ceilings": - final_rendering_params["shadow_caster"] = False - if rendering_params: - final_rendering_params.update(rendering_params) - if class_id is None: - class_id = CLASS_NAME_TO_CLASS_ID.get(self.category, SemanticClass.SCENE_OBJS) - super(URDFObject, self).__init__( - abilities=abilities, class_id=class_id, rendering_params=final_rendering_params - ) + # Load abilities from taxonomy if needed & possible + if abilities is None: + if OBJECT_TAXONOMY is not None: + taxonomy_class = OBJECT_TAXONOMY.get_class_name_from_igibson_category(self.category) + if taxonomy_class is not None: + abilities = OBJECT_TAXONOMY.get_abilities(taxonomy_class) + else: + abilities = {} + else: + abilities = {} + + assert isinstance(abilities, dict), "Object abilities must be in dictionary form." + self.abilities = abilities # Friction for all prismatic and revolute joints if joint_friction is not None: @@ -221,7 +216,7 @@ def __init__( self.is_fixed = [] self.main_body = -1 - logging.info("Category " + self.category) + # logging.info("Category " + self.category) self.filename = filename dirname = os.path.dirname(filename) urdf = os.path.basename(filename) @@ -230,7 +225,7 @@ def __init__( if os.path.exists(simplified_urdf): self.filename = simplified_urdf filename = simplified_urdf - logging.info("Loading the following URDF template " + filename) + # logging.info("Loading the following URDF template " + filename) self.object_tree = ET.parse(filename) # Parse the URDF if not visualize_primitives: @@ -310,7 +305,7 @@ def __init__( if self.bounding_box is None: self.overwrite_inertial = False - logging.info("Scale: " + np.array2string(scale)) + # logging.info("Scale: " + np.array2string(scale)) # We need to know where the base_link origin is wrt. the bounding box # center. That allows us to place the model correctly since the joint @@ -320,7 +315,6 @@ def __init__( self.avg_obj_dims = avg_obj_dims - self.base_link_name = get_base_link_name(self.object_tree) self.rename_urdf() self.meta_links = {} @@ -329,8 +323,8 @@ def __init__( self.scale_object() self.compute_object_pose() self.remove_floating_joints(self.scene_instance_folder) - self.prepare_link_based_bounding_boxes() + prepare_object_states(self, abilities, online=True) self.prepare_visual_mesh_to_material() def set_ignore_visual_shape(self, value): @@ -460,40 +454,49 @@ def sample_orientation(self): def get_prefixed_joint_name(self, name): return self.name + "_" + name - def get_prefixed_link_name(self, name): - if name == "world": - return name - elif name == self.base_link_name: - # The base_link get renamed as the link tag indicates - # Just change the name of the base link in the embedded urdf - return self.name - else: - # The other links get also renamed to add the name of the link tag as prefix - # This allows us to load several instances of the same object - return self.name + "_" + name - def rename_urdf(self): """ Helper function that renames the file paths in the object urdf from relative paths to absolute paths """ + base_link_name = get_base_link_name(self.object_tree) # Change the links of the added object to adapt to the given name for link_emb in self.object_tree.iter("link"): - link_emb.attrib["name"] = self.get_prefixed_link_name(link_emb.attrib["name"]) + # If the original urdf already contains world link, do not rename + if link_emb.attrib["name"] == "world": + pass + elif link_emb.attrib["name"] == base_link_name: + # The base_link get renamed as the link tag indicates + # Just change the name of the base link in the embedded urdf + link_emb.attrib["name"] = self.name + else: + # The other links get also renamed to add the name of the link tag as prefix + # This allows us to load several instances of the same object + link_emb.attrib["name"] = self.name + "_" + link_emb.attrib["name"] # Change the joints of the added object to adapt them to the given name for joint_emb in self.object_tree.iter("joint"): # We change the joint name joint_emb.attrib["name"] = self.get_prefixed_joint_name(joint_emb.attrib["name"]) - # We change the child link names for child_emb in joint_emb.findall("child"): - child_emb.attrib["link"] = self.get_prefixed_link_name(child_emb.attrib["link"]) - + # If the original urdf already contains world link, do not rename + if child_emb.attrib["link"] == "world": + pass + elif child_emb.attrib["link"] == base_link_name: + child_emb.attrib["link"] = self.name + else: + child_emb.attrib["link"] = self.name + "_" + child_emb.attrib["link"] # and the parent link names for parent_emb in joint_emb.findall("parent"): - parent_emb.attrib["link"] = self.get_prefixed_link_name(parent_emb.attrib["link"]) + # If the original urdf already contains world link, do not rename + if parent_emb.attrib["link"] == "world": + pass + elif parent_emb.attrib["link"] == base_link_name: + parent_emb.attrib["link"] = self.name + else: + parent_emb.attrib["link"] = self.name + "_" + parent_emb.attrib["link"] def scale_object(self): """ @@ -506,16 +509,16 @@ def scale_object(self): # First, define the scale in each link reference frame # and apply it to the joint values - base_link_name = self.get_prefixed_link_name(self.base_link_name) - self.scales_in_link_frame = {base_link_name: self.scale} + base_link_name = get_base_link_name(self.object_tree) + scales_in_lf = {base_link_name: self.scale} all_processed = False while not all_processed: all_processed = True for joint in self.object_tree.iter("joint"): parent_link_name = joint.find("parent").attrib["link"] child_link_name = joint.find("child").attrib["link"] - if parent_link_name in self.scales_in_link_frame and child_link_name not in self.scales_in_link_frame: - scale_in_parent_lf = self.scales_in_link_frame[parent_link_name] + if parent_link_name in scales_in_lf and child_link_name not in scales_in_lf: + scale_in_parent_lf = scales_in_lf[parent_link_name] # The location of the joint frame is scaled using the scale in the parent frame for origin in joint.iter("origin"): current_origin_xyz = np.array([float(val) for val in origin.attrib["xyz"].split(" ")]) @@ -548,7 +551,7 @@ def scale_object(self): # print("Adding: ", joint.find("child").attrib["link"]) - self.scales_in_link_frame[joint.find("child").attrib["link"]] = scale_in_child_lf + scales_in_lf[joint.find("child").attrib["link"]] = scale_in_child_lf # The axis of the joint is defined in the joint frame, we scale it after applying the rotation for axis in joint.iter("axis"): @@ -682,7 +685,7 @@ def scale_object(self): inertia.attrib["iyz"] = str(0.0) inertia.attrib["izz"] = str(0.0) - scale_in_lf = self.scales_in_link_frame[link.attrib["name"]] + scale_in_lf = scales_in_lf[link.attrib["name"]] # Apply the scale to all mesh elements within the link (original scale and origin) for mesh in link.iter("mesh"): if "scale" in mesh.attrib: @@ -783,7 +786,7 @@ def create_link_name_vm_mapping(self): link_name_to_vm_urdf = {} sub_urdf_tree = ET.parse(self.urdf_paths[i]) - links = sub_urdf_tree.findall("link") + links = sub_urdf_tree.findall(".//link") for link in links: name = link.attrib["name"] if name in link_name_to_vm_urdf: @@ -791,27 +794,6 @@ def create_link_name_vm_mapping(self): link_name_to_vm_urdf[name] = [] for visual_mesh in link.findall("visual/geometry/mesh"): link_name_to_vm_urdf[name].append(visual_mesh.attrib["filename"]) - - if self.merge_fixed_links: - # Add visual meshes of the child link to the parent link for fixed joints because they will be merged - # by pybullet after loading - vms_before_merging = set([item for _, vms in link_name_to_vm_urdf.items() for item in vms]) - directed_graph = nx.DiGraph() - child_to_parent = dict() - for joint in sub_urdf_tree.findall("joint"): - if joint.attrib["type"] == "fixed": - child_link_name = joint.find("child").attrib["link"] - parent_link_name = joint.find("parent").attrib["link"] - directed_graph.add_edge(child_link_name, parent_link_name) - child_to_parent[child_link_name] = parent_link_name - for child_link_name in list(nx.algorithms.topological_sort(directed_graph)): - if child_link_name in child_to_parent: - parent_link_name = child_to_parent[child_link_name] - link_name_to_vm_urdf[parent_link_name].extend(link_name_to_vm_urdf[child_link_name]) - del link_name_to_vm_urdf[child_link_name] - vms_after_merging = set([item for _, vms in link_name_to_vm_urdf.items() for item in vms]) - assert vms_before_merging == vms_after_merging - self.link_name_to_vm.append(link_name_to_vm_urdf) def randomize_texture(self): @@ -922,7 +904,7 @@ def prepare_procedural_texture(self): self.procedural_material = procedural_material - def _load(self, simulator): + def _load(self): """ Load the object into pybullet and set it to the correct pose """ @@ -934,14 +916,14 @@ def _load(self, simulator): flags |= p.URDF_IGNORE_VISUAL_SHAPES for idx in range(len(self.urdf_paths)): - logging.info("Loading " + self.urdf_paths[idx]) + # logging.info("Loading " + self.urdf_paths[idx]) is_fixed = self.is_fixed[idx] body_id = p.loadURDF(self.urdf_paths[idx], flags=flags, useFixedBase=is_fixed) # flags=p.URDF_USE_MATERIAL_COLORS_FROM_MTL) transformation = self.poses[idx] pos = transformation[0:3, 3] orn = np.array(quatXYZWFromRotMat(transformation[0:3, 0:3])) - logging.info("Moving URDF to (pos,ori): " + np.array_str(pos) + ", " + np.array_str(orn)) + # logging.info("Moving URDF to (pos,ori): " + np.array_str(pos) + ", " + np.array_str(orn)) dynamics_info = p.getDynamicsInfo(body_id, -1) inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4] pos, orn = p.multiplyTransforms(pos, orn, inertial_pos, inertial_orn) @@ -962,15 +944,6 @@ def _load(self, simulator): joint_position = self.joint_positions[idx][joint_name] set_joint_position(body_id, j, joint_position) - simulator.load_object_in_renderer( - self, - body_id, - self.class_id, - visual_mesh_to_material=self.visual_mesh_to_material[idx], - link_name_to_vm=self.link_name_to_vm[idx], - **self._rendering_params - ) - self.body_ids.append(body_id) self.load_supporting_surfaces() @@ -995,7 +968,7 @@ def reset(self): transformation = self.poses[idx] pos = transformation[0:3, 3] orn = np.array(quatXYZWFromRotMat(transformation[0:3, 0:3])) - logging.info("Resetting URDF to (pos,ori): " + np.array_str(pos) + ", " + np.array_str(orn)) + # logging.info("Resetting URDF to (pos,ori): " + np.array_str(pos) + ", " + np.array_str(orn)) dynamics_info = p.getDynamicsInfo(body_id, -1) inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4] pos, orn = p.multiplyTransforms(pos, orn, inertial_pos, inertial_orn) @@ -1011,19 +984,108 @@ def reset(self): body_id, j, p.VELOCITY_CONTROL, targetVelocity=0.0, force=self.joint_friction ) + def get_position(self): + """ + Get object position + + :return: position in xyz + """ + body_id = self.get_body_id() + pos, _ = p.getBasePositionAndOrientation(body_id) + return pos + + def get_orientation(self): + """ + Get object orientation + + :return: quaternion in xyzw + """ + body_id = self.get_body_id() + _, orn = p.getBasePositionAndOrientation(body_id) + return orn + + def get_position_orientation(self): + """ + Get object position and orientation + + :return: position in xyz + :return: quaternion in xyzw + """ + body_id = self.get_body_id() + pos, orn = p.getBasePositionAndOrientation(body_id) + return pos, orn + + def get_base_link_position_orientation(self): + """ + Get object base link position and orientation + + :return: position in xyz + :return: quaternion in xyzw + """ + # TODO: not used anywhere yet, but probably should be put in ObjectBase + body_id = self.get_body_id() + pos, orn = p.getBasePositionAndOrientation(body_id) + dynamics_info = p.getDynamicsInfo(body_id, -1) + inertial_pos = dynamics_info[3] + inertial_orn = dynamics_info[4] + inv_inertial_pos, inv_inertial_orn = p.invertTransform(inertial_pos, inertial_orn) + pos, orn = p.multiplyTransforms(pos, orn, inv_inertial_pos, inv_inertial_orn) + return pos, orn + + def set_position(self, pos): + """ + Set object position + + :param pos: position in xyz + """ + body_id = self.get_body_id() + if self.main_body_is_fixed: + logging.warning("cannot set_position for fixed objects") + return + + _, old_orn = p.getBasePositionAndOrientation(body_id) + p.resetBasePositionAndOrientation(body_id, pos, old_orn) + clear_cached_states(self) + + def set_orientation(self, orn): + """ + Set object orientation + + :param orn: quaternion in xyzw + """ + body_id = self.get_body_id() + if self.main_body_is_fixed: + logging.warning("cannot set_orientation for fixed objects") + return + + old_pos, _ = p.getBasePositionAndOrientation(body_id) + p.resetBasePositionAndOrientation(body_id, old_pos, orn) + clear_cached_states(self) + def set_position_orientation(self, pos, orn): + """ + Set object position and orientation + :param pos: position in xyz + :param orn: quaternion in xyzw + """ + body_id = self.get_body_id() if self.main_body_is_fixed: - logging.warning("cannot set position / orientation for fixed objects") + logging.warning("cannot set_position_orientation for fixed objects") return - super(URDFObject, self).set_position_orientation(pos, orn) + p.resetBasePositionAndOrientation(body_id, pos, orn) + clear_cached_states(self) def set_base_link_position_orientation(self, pos, orn): + body_id = self.get_body_id() if self.main_body_is_fixed: logging.warning("cannot set_base_link_position_orientation for fixed objects") return - - super(URDFObject, self).set_base_link_position_orientation(pos, orn) + dynamics_info = p.getDynamicsInfo(body_id, -1) + inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4] + pos, orn = p.multiplyTransforms(pos, orn, inertial_pos, inertial_orn) + self.set_position_orientation(pos, orn) + clear_cached_states(self) def get_body_id(self): return self.body_ids[self.main_body] @@ -1050,147 +1112,3 @@ def add_meta_links(self, meta_links): def set_room_floor(self, room_floor): assert self.category == "floors" self.room_floor = room_floor - - def prepare_link_based_bounding_boxes(self): - """This function simply converts the link bounding box metadata to match prefixed link names.""" - self.unscaled_link_bounding_boxes = {} - if "link_bounding_boxes" in self.metadata: - for name, bb_data in self.metadata["link_bounding_boxes"].items(): - converted_name = self.get_prefixed_link_name(name) - self.unscaled_link_bounding_boxes[converted_name] = bb_data - - def get_base_aligned_bounding_box( - self, body_id=None, link_id=None, visual=False, xy_aligned=False, fallback_to_aabb=False - ): - """Get a bounding box for this object that's axis-aligned in the object's base frame.""" - if body_id is None: - body_id = self.get_body_id() - - bbox_type = "visual" if visual else "collision" - - # Get the base position transform. - pos, orn = p.getBasePositionAndOrientation(body_id) - base_com_to_world = utils.quat_pos_to_mat(pos, orn) - - # Compute the world-to-base frame transform. - world_to_base_com = trimesh.transformations.inverse_matrix(base_com_to_world) - - # Grab the corners of all the different links' bounding boxes. We will later fit a bounding box to - # this set of points to get our final, base-frame bounding box. - points = [] - - links = [link_id] if link_id is not None else get_all_links(body_id) - for link in links: - link_name = get_link_name(body_id, link) - # If the link has a bounding box annotation. - if link_name in self.unscaled_link_bounding_boxes: - # If a visual bounding box does not exist in the dictionary, try switching to collision. - # We expect that every link has its collision bb annotated (or set to None if none exists). - if bbox_type == "visual" and "visual" not in self.unscaled_link_bounding_boxes[link_name]: - logging.debug( - "Falling back to collision bbox for object %s link %s since no visual bbox exists.", - self.name, - link_name, - ) - bbox_type = "collision" - - # Check if the annotation is still missing. - if bbox_type not in self.unscaled_link_bounding_boxes[link_name]: - raise ValueError( - "Could not find %s bounding box for object %s link %s" % (bbox_type, self.name, link_name) - ) - - # Check if a mesh exists for this link. If None, the link is meshless, so we continue to the next link. - if self.unscaled_link_bounding_boxes[link_name][bbox_type] is None: - continue - - # Get the extent and transform. - bb_data = self.unscaled_link_bounding_boxes[link_name][bbox_type]["oriented"] - extent_in_bbox_frame = np.array(bb_data["extent"]) - bbox_to_link_origin = np.array(bb_data["transform"]) - - # Get the link's pose in the base frame. - if link == -1: - link_com_to_base_com = np.eye(4) - else: - link_state = get_link_state(body_id, link, velocity=False) - link_com_to_world = utils.quat_pos_to_mat( - link_state.linkWorldPosition, link_state.linkWorldOrientation - ) - link_com_to_base_com = np.dot(world_to_base_com, link_com_to_world) - - # Scale the bounding box in link origin frame. Here we create a transform that first puts the bounding - # box's vertices into the link frame, and then scales them to match the scale applied to this object. - # Note that once scaled, the vertices of the bounding box do not necessarily form a cuboid anymore but - # instead a parallelepiped. This is not a problem because we later fit a bounding box to the points, - # this time in the object's base link frame. - scale_in_link_frame = np.diag(np.concatenate([self.scales_in_link_frame[link_name], [1]])) - bbox_to_scaled_link_origin = np.dot(scale_in_link_frame, bbox_to_link_origin) - - # Account for the link vs. center-of-mass. - dynamics_info = p.getDynamicsInfo(body_id, link) - inertial_pos, inertial_orn = p.invertTransform(dynamics_info[3], dynamics_info[4]) - link_origin_to_link_com = utils.quat_pos_to_mat(inertial_pos, inertial_orn) - - # Compute the bounding box vertices in the base frame. - bbox_to_link_com = np.dot(link_origin_to_link_com, bbox_to_scaled_link_origin) - bbox_center_in_base_com = np.dot(link_com_to_base_com, bbox_to_link_com) - vertices_in_base_com = np.array(list(itertools.product((1, -1), repeat=3))) * (extent_in_bbox_frame / 2) - - # Add the points to our collection of points. - points.extend(trimesh.transformations.transform_points(vertices_in_base_com, bbox_center_in_base_com)) - elif fallback_to_aabb: - # If no BB annotation is available, get the AABB for this link. - aabb_center, aabb_extent = get_center_extent(body_id, link=link) - aabb_vertices_in_world = aabb_center + np.array(list(itertools.product((1, -1), repeat=3))) * ( - aabb_extent / 2 - ) - aabb_vertices_in_base_com = trimesh.transformations.transform_points( - aabb_vertices_in_world, world_to_base_com - ) - points.extend(aabb_vertices_in_base_com) - else: - raise ValueError( - "Bounding box annotation missing for link: %s. Use fallback_to_aabb=True if you're okay with using " - "AABB as fallback." % link_name - ) - - if xy_aligned: - # If the user requested an XY-plane aligned bbox, convert everything to that frame. - # The desired frame is same as the base_com frame with its X/Y rotations removed. - translate = trimesh.transformations.translation_from_matrix(base_com_to_world) - - # To find the rotation that this transform does around the Z axis, we rotate the [1, 0, 0] vector by it - # and then take the arctangent of its projection onto the XY plane. - rotated_X_axis = base_com_to_world[:3, 0] - rotation_around_Z_axis = np.arctan2(rotated_X_axis[1], rotated_X_axis[0]) - xy_aligned_base_com_to_world = trimesh.transformations.compose_matrix( - translate=translate, angles=[0, 0, rotation_around_Z_axis] - ) - - # We want to move our points to this frame as well. - world_to_xy_aligned_base_com = trimesh.transformations.inverse_matrix(xy_aligned_base_com_to_world) - base_com_to_xy_aligned_base_com = np.dot(world_to_xy_aligned_base_com, base_com_to_world) - points = trimesh.transformations.transform_points(points, base_com_to_xy_aligned_base_com) - - # Finally update our desired frame. - desired_frame_to_world = xy_aligned_base_com_to_world - else: - # Default desired frame is base CoM frame. - desired_frame_to_world = base_com_to_world - - # TODO: Implement logic to allow tight bounding boxes that don't necessarily have to match the base frame. - # All points are now in the desired frame: either the base CoM or the xy-plane-aligned base CoM. - # Now fit a bounding box to all the points by taking the minimum/maximum in the desired frame. - aabb_min_in_desired_frame = np.amin(points, axis=0) - aabb_max_in_desired_frame = np.amax(points, axis=0) - bbox_center_in_desired_frame = (aabb_min_in_desired_frame + aabb_max_in_desired_frame) / 2 - bbox_extent_in_desired_frame = aabb_max_in_desired_frame - aabb_min_in_desired_frame - - # Transform the center to the world frame. - bbox_center_in_world = trimesh.transformations.transform_points( - [bbox_center_in_desired_frame], desired_frame_to_world - )[0] - bbox_orn_in_world = Rotation.from_matrix(desired_frame_to_world[:3, :3]).as_quat() - - return bbox_center_in_world, bbox_orn_in_world, bbox_extent_in_desired_frame, bbox_center_in_desired_frame diff --git a/igibson/objects/cube.py b/igibson/objects/cube.py index dc53b054d..54045de15 100644 --- a/igibson/objects/cube.py +++ b/igibson/objects/cube.py @@ -1,24 +1,22 @@ import pybullet as p -from igibson.objects.object_base import SingleBodyObject from igibson.objects.stateful_object import StatefulObject -from igibson.utils.constants import SemanticClass -class Cube(StatefulObject, SingleBodyObject): +class Cube(StatefulObject): """ Cube shape primitive """ - def __init__(self, pos=[1, 2, 3], dim=[1, 2, 3], visual_only=False, mass=1000, color=[1, 1, 1, 1], **kwargs): - super(Cube, self).__init__(**kwargs) + def __init__(self, pos=[1, 2, 3], dim=[1, 2, 3], visual_only=False, mass=1000, color=[1, 1, 1, 1]): + super(Cube, self).__init__() self.basePos = pos self.dimension = dim self.visual_only = visual_only self.mass = mass self.color = color - def _load(self, simulator): + def _load(self): """ Load the object into pybullet """ @@ -34,6 +32,4 @@ def _load(self, simulator): p.resetBasePositionAndOrientation(body_id, self.basePos, baseOrientation) - simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params) - - return [body_id] + return body_id diff --git a/igibson/objects/multi_object_wrappers.py b/igibson/objects/multi_object_wrappers.py index 955e002a9..5943bf37b 100644 --- a/igibson/objects/multi_object_wrappers.py +++ b/igibson/objects/multi_object_wrappers.py @@ -2,12 +2,12 @@ import pybullet as p -from igibson.object_states.factory import get_state_name from igibson.object_states.object_state_base import AbsoluteObjectState, BooleanState -from igibson.objects.object_base import BaseObject, NonRobotObject +from igibson.objects.object_base import Object +from igibson.objects.stateful_object import StatefulObject -class ObjectGrouper(NonRobotObject): +class ObjectGrouper(StatefulObject): """A multi-object wrapper that groups multiple objects and applies operations to all of them in parallel.""" class ProceduralMaterialAggregator(object): @@ -56,7 +56,7 @@ def dump(self): ) def load(self, data): - if isinstance(data, ObjectGrouper.AbsoluteStateAggregator.GroupedStateDump): + if isinstance(data, (ObjectGrouper.AbsoluteStateAggregator.GroupedStateDump, list)): # We're loading a many-to-many data dump. assert len(data) == len(self.object_grouper.objects) @@ -83,9 +83,11 @@ def set_value(self, other, new_value): obj.states[self.state_type].set_value(other, new_value) def __init__(self, objects_with_pose_offsets): + super(StatefulObject, self).__init__() + objects = [obj for obj, _ in objects_with_pose_offsets] pose_offsets = [trans for _, trans in objects_with_pose_offsets] - assert objects and all(isinstance(obj, BaseObject) for obj in objects) + assert objects and all(isinstance(obj, Object) for obj in objects) self.objects = objects # Pose offsets are the transformation of the object parts to the whole @@ -138,15 +140,12 @@ def grouped_function(*args, **kwargs): return attrs[0] - def _load(self, simulator): + def _load(self): body_ids = [] for obj in self.objects: - body_ids += obj.load(simulator) + body_ids += obj._load() return body_ids - def get_body_id(self): - raise ValueError("Cannot get_body_id on ObjectGrouper") - def get_position(self): raise ValueError("Cannot get_position on ObjectGrouper") @@ -170,24 +169,17 @@ def set_base_link_position_orientation(self, pos, orn): new_pos, new_orn = p.multiplyTransforms(pos, orn, part_pos, part_orn) obj.set_base_link_position_orientation(new_pos, new_orn) - def dump_state(self): - return { - get_state_name(state_type): state_instance.dump() - for state_type, state_instance in self.states.items() - if issubclass(state_type, AbsoluteObjectState) - } - - def load_state(self, dump): - for state_type, state_instance in self.states.items(): - if issubclass(state_type, AbsoluteObjectState): - state_instance.load(dump[get_state_name(state_type)]) + def rotate_by(self, x=0, y=0, z=0): + raise ValueError("Cannot rotate_by on ObjectGrouper") -class ObjectMultiplexer(NonRobotObject): +class ObjectMultiplexer(StatefulObject): """A multi-object wrapper that acts as a proxy for the selected one between the set of objects it contains.""" def __init__(self, name, multiplexed_objects, current_index): - assert multiplexed_objects and all(isinstance(obj, BaseObject) for obj in multiplexed_objects) + super(StatefulObject, self).__init__() + + assert multiplexed_objects and all(isinstance(obj, Object) for obj in multiplexed_objects) assert 0 <= current_index < len(multiplexed_objects) for obj in multiplexed_objects: @@ -197,6 +189,9 @@ def __init__(self, name, multiplexed_objects, current_index): self.current_index = current_index self.name = name + # This will help route obj.states to one of the multiplexed_objects + del self.states + def set_selection(self, idx): assert 0 <= idx < len(self._multiplexed_objects) self.current_index = idx @@ -213,15 +208,12 @@ def __getattr__(self, item): return getattr(self.current_selection(), item) - def _load(self, simulator): + def _load(self): body_ids = [] for obj in self._multiplexed_objects: - body_ids += obj.load(simulator) + body_ids += obj._load() return body_ids - def get_body_id(self): - return self.current_selection().get_body_id() - def get_position(self): return self.current_selection().get_position() @@ -243,6 +235,9 @@ def set_position_orientation(self, pos, orn): def set_base_link_position_orientation(self, pos, orn): return self.current_selection().set_base_link_position_orientation(pos, orn) + def rotate_by(self, x=0, y=0, z=0): + return self.current_selection().rotate_by(x, y, z) + def dump_state(self): return { "current_index": self.current_index, diff --git a/igibson/objects/object_base.py b/igibson/objects/object_base.py index c7d80af97..597b9c5b2 100644 --- a/igibson/objects/object_base.py +++ b/igibson/objects/object_base.py @@ -1,131 +1,110 @@ -from abc import ABCMeta, abstractmethod - -import numpy as np import pybullet as p -from future.utils import with_metaclass - -from igibson.utils.constants import SemanticClass -class BaseObject(with_metaclass(ABCMeta, object)): - """This is the interface that all iGibson objects must implement.""" +class Object(object): + """ + Base Object class + """ - DEFAULT_RENDERING_PARAMS = { - "use_pbr": True, - "use_pbr_mapping": True, - "shadow_caster": True, - } + def __init__(self): + self.body_id = None + self.loaded = False - def __init__(self, class_id=SemanticClass.USER_ADDED_OBJS, rendering_params=None): - """ - Create an object instance with the minimum information of class ID and rendering parameters. + # initialize with empty states + self.states = dict() + # handle to instances in the renderer + self.renderer_instances = [] - @param class_id: What class ID the object should be assigned in semantic segmentation rendering mode. - @param rendering_params: Any keyword arguments to be passed into simulator.load_object_into_renderer(...). - """ - self.states = {} - self._loaded = False - self.class_id = class_id - self._rendering_params = dict(self.DEFAULT_RENDERING_PARAMS) - if rendering_params is not None: - self._rendering_params.update(rendering_params) + def highlight(self): + for instance in self.renderer_instances: + instance.set_highlight(True) - def load(self, simulator): - """Load object into pybullet and return list of loaded body ids.""" - if self._loaded: - raise ValueError("Cannot load a single object multiple times.") - self._loaded = True - return self._load(simulator) + def unhighlight(self): + for instance in self.renderer_instances: + instance.set_highlight(False) - @abstractmethod - def get_body_id(self): + def load(self): """ - Gets the body ID for the object. - - If the object somehow has multiple bodies, this will be the default body that the default manipulation functions - will manipulate. + Load the object into pybullet. + _load() will be implemented in the subclasses """ - pass - - @abstractmethod - def _load(self, simulator): - pass + if self.loaded: + return self.body_id + self.body_id = self._load() + self.loaded = True + return self.body_id def get_position(self): - """Get object position in the format of Array[x, y, z]""" - return self.get_position_orientation()[0] + """ + Get object position + + :return: position in xyz + """ + pos, _ = p.getBasePositionAndOrientation(self.body_id) + return pos def get_orientation(self): - """Get object orientation as a quaternion in the format of Array[x, y, z, w]""" - return self.get_position_orientation()[1] + """ + Get object orientation + + :return: quaternion in xyzw + """ + _, orn = p.getBasePositionAndOrientation(self.body_id) + return orn def get_position_orientation(self): - """Get object position and orientation in the format of Tuple[Array[x, y, z], Array[x, y, z, w]]""" - pos, orn = p.getBasePositionAndOrientation(self.get_body_id()) - return np.array(pos), np.array(orn) + """ + Get object position and orientation + + :return: position in xyz + :return: quaternion in xyzw + """ + pos, orn = p.getBasePositionAndOrientation(self.body_id) + return pos, orn def set_position(self, pos): - """Set object position in the format of Array[x, y, z]""" - old_orn = self.get_orientation() - self.set_position_orientation(pos, old_orn) + """ + Set object position + + :param pos: position in xyz + """ + _, old_orn = p.getBasePositionAndOrientation(self.body_id) + p.resetBasePositionAndOrientation(self.body_id, pos, old_orn) def set_orientation(self, orn): - """Set object orientation as a quaternion in the format of Array[x, y, z, w]""" - old_pos = self.get_position() - self.set_position_orientation(old_pos, orn) + """ + Set object orientation + + :param orn: quaternion in xyzw + """ + old_pos, _ = p.getBasePositionAndOrientation(self.body_id) + p.resetBasePositionAndOrientation(self.body_id, old_pos, orn) def set_position_orientation(self, pos, orn): - """Set object position and orientation in the format of Tuple[Array[x, y, z], Array[x, y, z, w]]""" - p.resetBasePositionAndOrientation(self.get_body_id(), pos, orn) + """ + Set object position and orientation + :param pos: position in xyz + :param orn: quaternion in xyzw + """ + p.resetBasePositionAndOrientation(self.body_id, pos, orn) def set_base_link_position_orientation(self, pos, orn): - """Set object base link position and orientation in the format of Tuple[Array[x, y, z], Array[x, y, z, w]]""" - dynamics_info = p.getDynamicsInfo(self.get_body_id(), -1) + dynamics_info = p.getDynamicsInfo(self.body_id, -1) inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4] pos, orn = p.multiplyTransforms(pos, orn, inertial_pos, inertial_orn) self.set_position_orientation(pos, orn) + def rotate_by(self, x=0, y=0, z=0): + """ + Rotates an object by given euler angles + """ + e_x, e_y, e_z = p.getEulerFromQuaternion(self.get_orientation()) + self.set_orientation(p.getQuaternionFromEuler([e_x + x, e_y + y, e_z + z])) + def dump_state(self): - """Dump the state of the object other than what's not included in pybullet state.""" + """Dumps the state of the object other than what's not included in pybullet state.""" return None def load_state(self, dump): - """Load the state of the object other than what's not included in pybullet state.""" + """Loads the state of the object other than what's not included in pybullet state.""" pass - - -class NonRobotObject(BaseObject): - # This class implements the object interface for non-robot objects. - # Also allows us to identify non-robot objects until all simulator etc. call for importing etc. are unified. - - # TODO: This renderer_instances logic doesn't actually need to be specific to non-robot objects. Generalize this. - def __init__(self, **kwargs): - super(NonRobotObject, self).__init__(**kwargs) - - self.renderer_instances = [] - - def highlight(self): - for instance in self.renderer_instances: - instance.set_highlight(True) - - def unhighlight(self): - for instance in self.renderer_instances: - instance.set_highlight(False) - - -class SingleBodyObject(NonRobotObject): - """Provides convenience get_body_id() function for single-body objects.""" - - # TODO: Merge this into BaseObject once URDFObject also becomes single-body. - - def __init__(self, **kwargs): - super(SingleBodyObject, self).__init__(**kwargs) - self._body_id = None - - def load(self, simulator): - body_ids = super(NonRobotObject, self).load(simulator) - self._body_id = body_ids[0] - return body_ids - - def get_body_id(self): - return self._body_id diff --git a/igibson/objects/particles.py b/igibson/objects/particles.py index dc9ed087f..cf9f217be 100644 --- a/igibson/objects/particles.py +++ b/igibson/objects/particles.py @@ -7,24 +7,18 @@ import igibson from igibson.external.pybullet_tools import utils from igibson.external.pybullet_tools.utils import get_aabb_extent, get_link_name, link_from_name -from igibson.objects.object_base import SingleBodyObject +from igibson.objects.object_base import Object from igibson.utils import sampling_utils from igibson.utils.constants import PyBulletSleepState, SemanticClass _STASH_POSITION = [0, 0, -100] -class Particle(SingleBodyObject): +class Particle(Object): """ A particle object, used to simulate water stream and dust/stain """ - DEFAULT_RENDERING_PARAMS = { - "use_pbr": False, - "use_pbr_mapping": False, - "shadow_caster": True, - } - def __init__( self, size, @@ -35,7 +29,6 @@ def __init__( base_shape="sphere", mesh_filename=None, mesh_bounding_box=None, - **kwargs ): """ Create a particle. @@ -48,9 +41,8 @@ def __init__( :param base_shape: One of "cube", "sphere", "mesh". If mesh, mesh_filename also required. :param mesh_filename: Filename of obj file to load mesh from, if base_shape is "mesh". :param mesh_bounding_box: bounding box of the mesh when scale=1. Needed for scale computation. - :param rendering_params: rendering parameters to pass onto object base & renderer. """ - super(Particle, self).__init__(**kwargs) + super(Particle, self).__init__() self.base_pos = pos self.size = size self.visual_only = visual_only @@ -65,7 +57,7 @@ def __init__( self.mesh_filename = mesh_filename self.mesh_scale = np.array(size) / np.array(mesh_bounding_box) - def _load(self, simulator): + def _load(self): """ Load the object into pybullet """ @@ -97,28 +89,34 @@ def _load(self, simulator): p.resetBasePositionAndOrientation(body_id, np.array(self.base_pos), base_orientation) - simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params) - self.force_sleep(body_id) - return [body_id] + return body_id def force_sleep(self, body_id=None): if body_id is None: - body_id = self.get_body_id() + body_id = self.body_id activationState = p.ACTIVATION_STATE_SLEEP + p.ACTIVATION_STATE_DISABLE_WAKEUP p.changeDynamics(body_id, -1, activationState=activationState) def force_wakeup(self): activationState = p.ACTIVATION_STATE_WAKE_UP - p.changeDynamics(self.get_body_id(), -1, activationState=activationState) + p.changeDynamics(self.body_id, -1, activationState=activationState) class ParticleSystem(object): - DEFAULT_RENDERING_PARAMS = {} # Accept the Particle defaults but expose this interface for children - - def __init__(self, num, size, color=(1, 1, 1, 1), rendering_params=None, **kwargs): + def __init__( + self, + num, + size, + color=(1, 1, 1, 1), + class_id=SemanticClass.USER_ADDED_OBJS, + use_pbr=False, + use_pbr_mapping=False, + shadow_caster=True, + **kwargs + ): size = np.array(size) if size.ndim == 2: assert size.shape[0] == num @@ -133,19 +131,19 @@ def __init__(self, num, size, color=(1, 1, 1, 1), rendering_params=None, **kwarg self._particles_activated_at_any_time = set() self._simulator = None - - rendering_params_for_particle = dict(self.DEFAULT_RENDERING_PARAMS) - if rendering_params is not None: - rendering_params_for_particle.update(rendering_params) + self._import_params = { + "class_id": class_id, + "use_pbr": use_pbr, + "use_pbr_mapping": use_pbr_mapping, + "shadow_caster": shadow_caster, + } for i in range(num): # If different sizes / colors provided for each instance, pick the correct one for this instance. this_size = size if size.ndim == 1 else size[i] this_color = color if color.ndim == 1 else color[i] - particle = Particle( - this_size, _STASH_POSITION, color=this_color, rendering_params=rendering_params_for_particle, **kwargs - ) + particle = Particle(this_size, _STASH_POSITION, color=this_color, **kwargs) self._all_particles.append(particle) self._stashed_particles.append(particle) @@ -209,10 +207,10 @@ def stash_particle(self, particle): particle.force_sleep() def _load_particle(self, particle): - body_ids = particle.load(self._simulator) + body_id = self._simulator.import_object(particle, **self._import_params) # Put loaded particles at the stash position initially. particle.set_position(_STASH_POSITION) - return body_ids + return body_id def unstash_particle(self, position, orientation, particle=None): # If the user wants a particular particle, give it to them. Otherwise, unstash one. @@ -222,7 +220,7 @@ def unstash_particle(self, position, orientation, particle=None): particle = self._stashed_particles.popleft() # Lazy loading of the particle now if not already loaded - if not particle.get_body_id(): + if not particle.loaded: self._load_particle(particle) particle.set_position_orientation(position, orientation) @@ -381,7 +379,6 @@ class WaterStream(ParticleSystem): ] ) _COLOR_OPTIONS = np.array([(0.61, 0.82, 0.86, 1), (0.5, 0.77, 0.87, 1)]) - DEFAULT_RENDERING_PARAMS = {"use_pbr": True} # PBR needs to be on for the shiny water particles. def __init__(self, water_source_pos, num, initial_dump=None, **kwargs): if initial_dump is not None: @@ -400,6 +397,7 @@ def __init__(self, water_source_pos, num, initial_dump=None, **kwargs): color=self.colors, visual_only=False, mass=0.00005, # each drop is around 0.05 grams + use_pbr=True, # PBR needs to be on for the shiny water particles. **kwargs ) @@ -433,16 +431,16 @@ def initialize(self, simulator): def _load_particle(self, particle): # First load the particle normally. - body_ids = super(WaterStream, self)._load_particle(particle) + body_id = super(WaterStream, self)._load_particle(particle) # Set renderer instance settings on the particles. instances = self._simulator.renderer.get_instances() for instance in instances: - if instance.pybullet_uuid in body_ids: + if instance.pybullet_uuid == body_id: instance.roughness = 0 instance.metalness = 1 - return body_ids + return body_id def set_running(self, on): self.on = on diff --git a/igibson/objects/pedestrian.py b/igibson/objects/pedestrian.py index 2d4ff60de..21846c816 100644 --- a/igibson/objects/pedestrian.py +++ b/igibson/objects/pedestrian.py @@ -3,18 +3,16 @@ import pybullet as p import igibson -from igibson.objects.object_base import SingleBodyObject from igibson.objects.stateful_object import StatefulObject -from igibson.utils.constants import SemanticClass -class Pedestrian(StatefulObject, SingleBodyObject): +class Pedestrian(StatefulObject): """ Pedestiran object """ - def __init__(self, style="standing", pos=[0, 0, 0], **kwargs): - super(Pedestrian, self).__init__(**kwargs) + def __init__(self, style="standing", pos=[0, 0, 0]): + super(Pedestrian, self).__init__() self.collision_filename = os.path.join( igibson.assets_path, "models", "person_meshes", "person_{}".format(style), "meshes", "person_vhacd.obj" ) @@ -24,7 +22,7 @@ def __init__(self, style="standing", pos=[0, 0, 0], **kwargs): self.cid = None self.pos = pos - def _load(self, simulator): + def _load(self): """ Load the object into pybullet """ @@ -46,9 +44,7 @@ def _load(self, simulator): parentFrameOrientation=[-0.5, -0.5, -0.5, 0.5], ) # facing x axis - simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params) - - return [body_id] + return body_id def reset_position_orientation(self, pos, orn): """ diff --git a/igibson/objects/shapenet_object.py b/igibson/objects/shapenet_object.py index 98c9afdee..a20dd3437 100644 --- a/igibson/objects/shapenet_object.py +++ b/igibson/objects/shapenet_object.py @@ -1,19 +1,17 @@ import numpy as np import pybullet as p -from igibson.objects.object_base import SingleBodyObject from igibson.objects.stateful_object import StatefulObject -from igibson.utils.constants import SemanticClass -class ShapeNetObject(StatefulObject, SingleBodyObject): +class ShapeNetObject(StatefulObject): """ ShapeNet object Reference: https://www.shapenet.org/ """ - def __init__(self, path, scale=1.0, position=[0, 0, 0], orientation=[0, 0, 0], **kwargs): - super(ShapeNetObject, self).__init__(**kwargs) + def __init__(self, path, scale=1.0, position=[0, 0, 0], orientation=[0, 0, 0]): + super(ShapeNetObject, self).__init__() self.filename = path self.scale = scale self.position = position @@ -35,7 +33,7 @@ def __init__(self, path, scale=1.0, position=[0, 0, 0], orientation=[0, 0, 0], * "orientation_quat": pose[1], } - def _load(self, simulator): + def _load(self): """ Load the object into pybullet """ @@ -47,6 +45,4 @@ def _load(self, simulator): baseCollisionShapeIndex=collision_id, baseVisualShapeIndex=-1, ) - simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params) - - return [body_id] + return body_id diff --git a/igibson/objects/soft_object.py b/igibson/objects/soft_object.py index e120f9793..c7f4da1a8 100644 --- a/igibson/objects/soft_object.py +++ b/igibson/objects/soft_object.py @@ -1,11 +1,9 @@ import pybullet as p -from igibson.objects.object_base import SingleBodyObject from igibson.objects.stateful_object import StatefulObject -from igibson.utils.constants import SemanticClass -class SoftObject(StatefulObject, SingleBodyObject): +class SoftObject(StatefulObject): """ Soft object (WIP) """ @@ -30,9 +28,8 @@ def __init__( frictionCoeff=0, useFaceContact=0, useSelfCollision=0, - **kwargs ): - super(SoftObject, self).__init__(**kwargs) + super(SoftObject, self).__init__() self.filename = filename self.scale = scale self.basePosition = basePosition @@ -52,7 +49,7 @@ def __init__( self.useFaceContact = useFaceContact self.useSelfCollision = useSelfCollision - def _load(self, simulator): + def _load(self): """ Load the object into pybullet """ @@ -80,14 +77,10 @@ def _load(self, simulator): # Set signed distance function voxel size (integrate to Simulator class?) p.setPhysicsEngineParameter(sparseSdfVoxelSize=0.1) - simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params, softbody=True) - - return [body_id] + return body_id def add_anchor(self, nodeIndex=-1, bodyUniqueId=-1, linkIndex=-1, bodyFramePosition=[0, 0, 0], physicsClientId=0): """ Create soft body anchor """ - p.createSoftBodyAnchor( - self.get_body_id(), nodeIndex, bodyUniqueId, linkIndex, bodyFramePosition, physicsClientId - ) + p.createSoftBodyAnchor(self.body_id, nodeIndex, bodyUniqueId, linkIndex, bodyFramePosition, physicsClientId) diff --git a/igibson/objects/stateful_object.py b/igibson/objects/stateful_object.py index 2ec182ad5..1a9ad1641 100644 --- a/igibson/objects/stateful_object.py +++ b/igibson/objects/stateful_object.py @@ -1,22 +1,18 @@ from igibson.object_states.factory import get_state_name, prepare_object_states from igibson.object_states.object_state_base import AbsoluteObjectState from igibson.object_states.utils import clear_cached_states -from igibson.objects.object_base import BaseObject +from igibson.objects.object_base import Object -class StatefulObject(BaseObject): - """Objects that support object states.""" +class StatefulObject(Object): + """ + Stateful Base Object class + """ - def __init__(self, abilities=None, **kwargs): - super(StatefulObject, self).__init__(**kwargs) - prepare_object_states(self, abilities=abilities) - - def load(self, simulator): - body_ids = super(StatefulObject, self).load(simulator) - for state in self.states.values(): - state.initialize(simulator) - - return body_ids + def __init__(self): + super(StatefulObject, self).__init__() + self.abilities = {} + prepare_object_states(self, online=True) def dump_state(self): return { @@ -30,6 +26,14 @@ def load_state(self, dump): if issubclass(state_type, AbsoluteObjectState): state_instance.load(dump[get_state_name(state_type)]) + def set_position(self, pos): + super(StatefulObject, self).set_position(pos) + clear_cached_states(self) + + def set_orientation(self, orn): + super(StatefulObject, self).set_orientation(orn) + clear_cached_states(self) + def set_position_orientation(self, pos, orn): super(StatefulObject, self).set_position_orientation(pos, orn) clear_cached_states(self) diff --git a/igibson/objects/visual_marker.py b/igibson/objects/visual_marker.py index 21a9bc9d4..fac3e72e9 100644 --- a/igibson/objects/visual_marker.py +++ b/igibson/objects/visual_marker.py @@ -1,19 +1,13 @@ import pybullet as p -from igibson.objects.object_base import SingleBodyObject +from igibson.objects.object_base import Object -class VisualMarker(SingleBodyObject): +class VisualMarker(Object): """ Visual shape created with shape primitives """ - DEFAULT_RENDERING_PARAMS = { - "use_pbr": False, - "use_pbr_mapping": False, - "shadow_caster": False, - } - def __init__( self, visual_shape=p.GEOM_SPHERE, @@ -24,7 +18,6 @@ def __init__( initial_offset=[0, 0, 0], filename=None, scale=[1.0] * 3, - **kwargs ): """ create a visual shape to show in pybullet and MeshRenderer @@ -38,7 +31,7 @@ def __init__( :param filename: mesh file name for p.GEOM_MESH :param scale: scale for p.GEOM_MESH """ - super(VisualMarker, self).__init__(**kwargs) + super(VisualMarker, self).__init__() self.visual_shape = visual_shape self.rgba_color = rgba_color self.radius = radius @@ -48,7 +41,7 @@ def __init__( self.filename = filename self.scale = scale - def _load(self, simulator): + def _load(self): """ Load the object into pybullet """ @@ -80,9 +73,7 @@ def _load(self, simulator): baseVisualShapeIndex=shape, baseCollisionShapeIndex=-1, flags=p.URDF_ENABLE_SLEEPING ) - simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params) - - return [body_id] + return body_id def set_color(self, color): """ @@ -90,18 +81,18 @@ def set_color(self, color): :param color: normalized rgba color """ - p.changeVisualShape(self.get_body_id(), -1, rgbaColor=color) + p.changeVisualShape(self.body_id, -1, rgbaColor=color) def force_sleep(self, body_id=None): if body_id is None: - body_id = self.get_body_id() + body_id = self.body_id activationState = p.ACTIVATION_STATE_SLEEP + p.ACTIVATION_STATE_DISABLE_WAKEUP p.changeDynamics(body_id, -1, activationState=activationState) def force_wakeup(self): activationState = p.ACTIVATION_STATE_WAKE_UP - p.changeDynamics(self.get_body_id(), -1, activationState=activationState) + p.changeDynamics(self.body_id, -1, activationState=activationState) def set_position(self, pos): self.force_wakeup() diff --git a/igibson/objects/ycb_object.py b/igibson/objects/ycb_object.py index d331c5a97..14a184abe 100644 --- a/igibson/objects/ycb_object.py +++ b/igibson/objects/ycb_object.py @@ -3,24 +3,22 @@ import pybullet as p import igibson -from igibson.objects.object_base import SingleBodyObject from igibson.objects.stateful_object import StatefulObject -from igibson.utils.constants import SemanticClass -class YCBObject(StatefulObject, SingleBodyObject): +class YCBObject(StatefulObject): """ YCB Object from assets/models/ycb Reference: https://www.ycbbenchmarks.com/ """ - def __init__(self, name, scale=1, **kwargs): - super(YCBObject, self).__init__(**kwargs) + def __init__(self, name, scale=1): + super(YCBObject, self).__init__() self.visual_filename = os.path.join(igibson.assets_path, "models", "ycb", name, "textured_simple.obj") self.collision_filename = os.path.join(igibson.assets_path, "models", "ycb", name, "textured_simple_vhacd.obj") self.scale = scale - def _load(self, simulator): + def _load(self): collision_id = p.createCollisionShape(p.GEOM_MESH, fileName=self.collision_filename, meshScale=self.scale) visual_id = p.createVisualShape(p.GEOM_MESH, fileName=self.visual_filename, meshScale=self.scale) @@ -30,14 +28,8 @@ def _load(self, simulator): basePosition=[0.2, 0.2, 1.5], baseMass=0.1, ) + self.body_id = body_id + return body_id - simulator.load_object_in_renderer(self, body_id, self.class_id, **self._rendering_params) - - return [body_id] - - def reset(self): - return - - def force_wakeup(self): - activationState = p.ACTIVATION_STATE_WAKE_UP - p.changeDynamics(self.get_body_id(), -1, activationState=activationState) + def get_body_id(self): + return self.body_id diff --git a/igibson/physics/settings.py b/igibson/physics/settings.py deleted file mode 100644 index 53ed518bc..000000000 --- a/igibson/physics/settings.py +++ /dev/null @@ -1,8 +0,0 @@ -class PhysicsSettings(object): - def __init__( - self, - solver_iterations=100, - enhanced_determinism=False, - ): - self.solver_iterations = solver_iterations - enhanced_determinism = enhanced_determinism diff --git a/igibson/render/CMakeLists.txt b/igibson/render/CMakeLists.txt index 20d0d0db3..65f5ba714 100644 --- a/igibson/render/CMakeLists.txt +++ b/igibson/render/CMakeLists.txt @@ -2,221 +2,147 @@ cmake_minimum_required(VERSION 2.8.12) project(CppMeshRenderer) set(USE_GLAD TRUE) -set(CMAKE_EXPORT_COMPILE_COMMANDS TRUE) -set(OpenGL_GL_PREFERENCE GLVND) - -if(MAC_PLATFORM) - set(USE_GLFW TRUE) - set(USE_VR FALSE) - set(USE_EGL FALSE) -elseif(WIN32) - set(USE_GLFW FALSE) - # iGibson on Windows is always in VR mode - set(USE_VR TRUE) - set(USE_EGL FALSE) -elseif(UNIX) - set(USE_GLFW FALSE) - set(USE_EGL TRUE) -else() - message( FATAL_ERROR "Unsupported platform: iGibson only supports macOS, linux, and windows" ) -endif() -# If a user is using a new enough cmake version (Ubuntu 18.04 or greater) -# Automatically detect and enable/disable CUDA, otherwise assume true -if (UNIX AND (CMAKE_VERSION VERSION_GREATER 3.17 OR CMAKE_VERSION VERSION_EQUAL 3.17)) - find_package(CUDAToolkit) - if (NOT CUDAToolkit_FOUND) - message(WARNING [=[ - nvcc not detected in path. iGibson will be built without CUDA support used - for rendering to tensors. If desired, please install cudatoolkit with your - package manager or cudatoolkit-dev with conda. - ]=]) +if (MAC_PLATFORM) + set(USE_CUDA FALSE) + set(USE_GLFW TRUE) + set(USE_VR FALSE) +elseif (WIN32) set(USE_CUDA FALSE) - else () + set(USE_GLFW TRUE) + # iGibson on Windows is always in VR mode + set(USE_VR TRUE) +else () set(USE_CUDA TRUE) - endif () -elseif(UNIX) - # Assume CUDA true otherwise - set(USE_CUDA TRUE) -else() - # Assume CUDA false otherwise (macOS, Windows) - set(USE_CUDA FALSE) -endif() + set(USE_GLFW FALSE) + set(USE_VR FALSE) +endif () include_directories(glad) -if(NOT USE_GLAD) - find_package(OpenGL) -else() - add_definitions(-DUSE_GLAD) -endif() +if (NOT USE_GLAD) + find_package(OpenGL) +else () + add_definitions(-DUSE_GLAD) +endif () -if(USE_VR) - find_package(OpenGL) - # OpenGL is needed for vr-gl interoperation +if (USE_VR) + find_package(OpenGL) + # OpenGL is needed for vr-gl interoperation endif() -if(USE_CUDA) - add_definitions(-DUSE_CUDA) - find_package(OpenGL) - # OpenGL is still needed for cuda-gl interoperation -endif() +if (USE_CUDA) + add_definitions(-DUSE_CUDA) + find_package(OpenGL) + #OpenGL is still needed for cuda-gl interoperation +endif () add_subdirectory(pybind11) add_subdirectory(cryptopp) include_directories(cryptopp) + # Add GLM include directory include_directories("glm") -if(USE_GLFW OR USE_VR) - set(GLFW_DIR glfw) - set(GLFW_BUILD_EXAMPLES - OFF - CACHE INTERNAL "Build the GLFW example programs") - set(GLFW_BUILD_TESTS - OFF - CACHE INTERNAL "Build the GLFW test programs") - set(GLFW_BUILD_DOCS - OFF - CACHE INTERNAL "Build the GLFW documentation") - set(GLFW_INSTALL - OFF - CACHE INTERNAL "Generate installation target") - add_subdirectory("${GLFW_DIR}") - include_directories("${GLFW_DIR}/include") -endif() - -if(USE_VR) - # Find OpenVR - set(OPENVR_DIR openvr) - set(OPENVR_INCLUDE_DIR "${OPENVR_DIR}/headers") - include_directories("${OPENVR_INCLUDE_DIR}") +if (USE_GLFW) + set(GLFW_DIR glfw) + set(GLFW_BUILD_EXAMPLES OFF CACHE INTERNAL "Build the GLFW example programs") + set(GLFW_BUILD_TESTS OFF CACHE INTERNAL "Build the GLFW test programs") + set(GLFW_BUILD_DOCS OFF CACHE INTERNAL "Build the GLFW documentation") + set(GLFW_INSTALL OFF CACHE INTERNAL "Generate installation target") + add_subdirectory("${GLFW_DIR}") + include_directories("${GLFW_DIR}/include") +endif () + +if (USE_VR) + # Find OpenVR + set(WINDOWS_PATH_SUFFIXES win64 Win64 x64) + set(OPENVR_DIR openvr) + find_library(OPENVR_LIBRARIES + NAMES + openvr_api + PATHS + "${OPENVR_DIR}/bin" + "${OPENVR_DIR}/lib" + PATH_SUFFIXES + ${WINDOWS_PATH_SUFFIXES} + NO_DEFAULT_PATH + NO_CMAKE_FIND_ROOT_PATH + ) + set(OPENVR_INCLUDE_DIR "${OPENVR_DIR}/headers") + include_directories("${OPENVR_INCLUDE_DIR}") - if(WIN32) - set(PATH_SUFFIXES win64 Win64 x64) # Find SRAnipal set(SRANI_DIR sranipal) - find_library( - SRANI_LIBRARIES - NAMES SRAnipal - PATHS "${SRANI_DIR}/lib" - NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) + find_library(SRANI_LIBRARIES + NAMES + SRAnipal + PATHS + "${SRANI_DIR}/lib" + NO_DEFAULT_PATH + NO_CMAKE_FIND_ROOT_PATH + ) include_directories("${SRANI_DIR}/include") - else() - set(PATH_SUFFIXES linux64 Linux64 x64) - endif() - - find_library( - OPENVR_LIBRARIES - NAMES openvr_api - PATHS "${OPENVR_DIR}/bin" "${OPENVR_DIR}/lib" - PATH_SUFFIXES ${PATH_SUFFIXES} - NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) - - add_library( - VRRendererContext MODULE glad/gl.cpp cpp/vr_mesh_renderer.cpp - cpp/glfw_mesh_renderer.cpp cpp/mesh_renderer.cpp) - if(WIN32) - target_link_libraries( - VRRendererContext - PRIVATE pybind11::module - ${CMAKE_DL_LIBS} - glfw - ${GLFW_LIBRARIES} - ${OPENGL_LIBRARIES} - ${OPENVR_LIBRARIES} - ${SRANI_LIBRARIES} - cryptopp-static) - else() - target_link_libraries( - VRRendererContext - PRIVATE pybind11::module - ${CMAKE_DL_LIBS} - glfw - ${GLFW_LIBRARIES} - ${OPENGL_LIBRARIES} - ${OPENVR_LIBRARIES} - ${CUDA_LIBRARIES} - cryptopp-static) - endif() - set_target_properties( - VRRendererContext PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}" - SUFFIX "${PYTHON_MODULE_EXTENSION}") endif() -if(USE_EGL) - if(USE_CUDA) - find_package(CUDA REQUIRED) - set(CUDA_LIBRARIES PUBLIC ${CUDA_LIBRARIES}) - cuda_add_library(EGLRendererContext MODULE glad/egl.cpp glad/gl.cpp - cpp/mesh_renderer.cpp cpp/egl_mesh_renderer.cpp) - else() - add_library( - EGLRendererContext MODULE glad/egl.cpp glad/gl.cpp cpp/mesh_renderer.cpp - cpp/egl_mesh_renderer.cpp) - endif() - - if(USE_GLAD) - target_link_libraries(EGLRendererContext PRIVATE pybind11::module dl - pthread cryptopp-static) - else() - target_link_libraries( - EGLRendererContext PRIVATE pybind11::module dl pthread EGL - ${OPENGL_LIBRARIES} cryptopp-static) - endif() - - set_target_properties( - EGLRendererContext PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}" - SUFFIX "${PYTHON_MODULE_EXTENSION}") +add_library(tinyobjloader MODULE cpp/tinyobjloader/tiny_obj_loader.cc cpp/tinyobjloader/tiny_obj_loader_decrypt.cc cpp/tinyobjloader/bindings.cc) +if (USE_VR) + add_library(VRRendererContext MODULE glad/gl.cpp cpp/vr_mesh_renderer.cpp cpp/glfw_mesh_renderer.cpp cpp/mesh_renderer.cpp) +else() + if (USE_CUDA) + find_package(CUDA REQUIRED) + set(CUDA_LIBRARIES PUBLIC ${CUDA_LIBRARIES}) + cuda_add_library(EGLRendererContext MODULE glad/egl.cpp glad/gl.cpp cpp/mesh_renderer.cpp cpp/egl_mesh_renderer.cpp) + else () + add_library(EGLRendererContext MODULE glad/egl.cpp glad/gl.cpp cpp/mesh_renderer.cpp cpp/egl_mesh_renderer.cpp) + endif () + if (USE_GLFW) + add_library(GLFWRendererContext MODULE glad/gl.cpp cpp/glfw_mesh_renderer.cpp cpp/mesh_renderer.cpp) + endif () endif() -if(USE_GLFW) - add_library(GLFWRendererContext MODULE glad/gl.cpp cpp/glfw_mesh_renderer.cpp - cpp/mesh_renderer.cpp) - if(WIN32) - if(USE_GLAD) - target_link_libraries( - GLFWRendererContext PRIVATE pybind11::module glfw ${GLFW_LIBRARIES} - cryptopp-static) - else() - target_link_libraries( - GLFWRendererContext PRIVATE pybind11::module glfw ${GLFW_LIBRARIES} - ${OPENGL_LIBRARIES} cryptopp-static) +if (USE_VR) + target_link_libraries(VRRendererContext PRIVATE pybind11::module ${CMAKE_DL_LIBS} glfw ${GLFW_LIBRARIES} ${OPENGL_LIBRARIES} ${OPENVR_LIBRARIES} ${SRANI_LIBRARIES} cryptopp-static) +else () + if (USE_GLAD) + target_link_libraries(EGLRendererContext PRIVATE pybind11::module dl pthread cryptopp-static) + if (USE_GLFW) + target_link_libraries(GLFWRendererContext PRIVATE pybind11::module dl glfw ${GLFW_LIBRARIES} cryptopp-static) + endif () + else () + target_link_libraries(EGLRendererContext PRIVATE pybind11::module dl pthread EGL ${OPENGL_LIBRARIES} cryptopp-static) + if (USE_GLFW) + target_link_libraries(GLFWRendererContext PRIVATE pybind11::module dl glfw ${GLFW_LIBRARIES} ${OPENGL_LIBRARIES} cryptopp-static) + endif () endif() - else() - if(USE_GLAD) - target_link_libraries( - GLFWRendererContext PRIVATE pybind11::module dl glfw ${GLFW_LIBRARIES} - cryptopp-static) - else() - target_link_libraries( - GLFWRendererContext PRIVATE pybind11::module dl glfw ${GLFW_LIBRARIES} - ${OPENGL_LIBRARIES} cryptopp-static) - endif() - endif() - set_target_properties( - GLFWRendererContext PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}" - SUFFIX "${PYTHON_MODULE_EXTENSION}") -endif() +endif () -add_library( - tinyobjloader MODULE - cpp/tinyobjloader/tiny_obj_loader.cc - cpp/tinyobjloader/tiny_obj_loader_decrypt.cc cpp/tinyobjloader/bindings.cc) target_link_libraries(tinyobjloader PRIVATE pybind11::module cryptopp-static) -set_target_properties( - tinyobjloader PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}" - SUFFIX "${PYTHON_MODULE_EXTENSION}") - -if(UNIX) - add_executable(query_devices glad/egl.cpp glad/gl.cpp cpp/query_devices.cpp) - add_executable(test_device glad/egl.cpp glad/gl.cpp cpp/test_device.cpp) - if(USE_GLAD) - target_link_libraries(query_devices dl pthread) - target_link_libraries(test_device dl pthread) - else() - target_link_libraries(query_devices dl pthread EGL ${OPENGL_LIBRARIES}) - target_link_libraries(test_device dl pthread EGL ${OPENGL_LIBRARIES}) - endif() + +if (USE_VR) + set_target_properties(VRRendererContext PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}" + SUFFIX "${PYTHON_MODULE_EXTENSION}") +else() + set_target_properties(EGLRendererContext PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}" + SUFFIX "${PYTHON_MODULE_EXTENSION}") + if (USE_GLFW) + set_target_properties(GLFWRendererContext PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}" + SUFFIX "${PYTHON_MODULE_EXTENSION}") + endif () +endif() + +set_target_properties(tinyobjloader PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}" + SUFFIX "${PYTHON_MODULE_EXTENSION}") + +if (NOT USE_VR) + add_executable(query_devices glad/egl.cpp glad/gl.cpp cpp/query_devices.cpp) + add_executable(test_device glad/egl.cpp glad/gl.cpp cpp/test_device.cpp) + if (USE_GLAD) + target_link_libraries(query_devices dl pthread) + target_link_libraries(test_device dl pthread) + else () + target_link_libraries(query_devices dl pthread EGL ${OPENGL_LIBRARIES}) + target_link_libraries(test_device dl pthread EGL ${OPENGL_LIBRARIES}) + endif () endif() diff --git a/igibson/render/cpp/glfw_mesh_renderer.cpp b/igibson/render/cpp/glfw_mesh_renderer.cpp index 2e2392a92..8b78a7597 100644 --- a/igibson/render/cpp/glfw_mesh_renderer.cpp +++ b/igibson/render/cpp/glfw_mesh_renderer.cpp @@ -69,32 +69,6 @@ int GLFWRendererContext::init() { exit(EXIT_FAILURE); } - // Our codebase assumes that window size is equal to the framebuffer size. - // The change below was to maintain this assumption for high DPI displays. - int framebufferWidth; - int framebufferHeight; - glfwGetFramebufferSize(this->window, &framebufferWidth, &framebufferHeight); - - int scaling_factor = framebufferWidth / m_windowWidth; - - // This accounts for retina display. This display uses double-sized pixels. We need to create a window that is - // half the size to accomodate for that - if(framebufferWidth!=m_windowWidth) - { - glfwDestroyWindow(this->window); - if (m_fullscreen) { - this->window = glfwCreateWindow(m_windowWidth/scaling_factor, m_windowHeight/scaling_factor, "Gibson Renderer Output", glfwGetPrimaryMonitor(), NULL); - } - else { - this->window = glfwCreateWindow(m_windowWidth/scaling_factor, m_windowHeight/scaling_factor, "Gibson Renderer Output", NULL, NULL); - } - - if (this->window == NULL) { - fprintf(stderr, "ERROR: Failed to create GLFW window.\n"); - exit(EXIT_FAILURE); - } - } - glfwMakeContextCurrent(this->window); glfwSwapInterval(0); diff --git a/igibson/render/cpp/glfw_mesh_renderer.h b/igibson/render/cpp/glfw_mesh_renderer.h index cff58aabe..3fbf4b0dd 100644 --- a/igibson/render/cpp/glfw_mesh_renderer.h +++ b/igibson/render/cpp/glfw_mesh_renderer.h @@ -2,7 +2,9 @@ #define GLFW_MESH_RENDERER_HEADER #include "mesh_renderer.h" +#ifdef WIN32 #include +#endif class GLFWRendererContext: public MeshRendererContext { diff --git a/igibson/render/cpp/mesh_renderer.cpp b/igibson/render/cpp/mesh_renderer.cpp index af7634867..8c946cb48 100644 --- a/igibson/render/cpp/mesh_renderer.cpp +++ b/igibson/render/cpp/mesh_renderer.cpp @@ -77,7 +77,7 @@ using CryptoPP::byte; class Image { public: static std::shared_ptr fromFile(const std::string &filename, int channels) { - std::printf("Loading image: %s\n", filename.c_str()); + // std::printf("Loading image: %s\n", filename.c_str()); stbi_set_flip_vertically_on_load(false); std::shared_ptr image{new Image}; @@ -325,7 +325,7 @@ py::list MeshRendererContext::setup_framebuffer_meshrenderer(int width, int heig glBindTexture(GL_TEXTURE_2D, color_tex_normal); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL); glBindTexture(GL_TEXTURE_2D, color_tex_semantics); - glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL); + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL); glBindTexture(GL_TEXTURE_2D, color_tex_ins_seg); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL); glBindTexture(GL_TEXTURE_2D, color_tex_3d); @@ -388,7 +388,7 @@ py::list MeshRendererContext::setup_framebuffer_meshrenderer_ms(int width, int h glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, color_tex_normal); glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE, 4, GL_RGBA, width, height, GL_TRUE); glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, color_tex_semantics); - glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE, 4, GL_RGBA32F, width, height, GL_TRUE); + glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE, 4, GL_RGBA, width, height, GL_TRUE); glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, color_tex_ins_seg); glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE, 4, GL_RGBA32F, width, height, GL_TRUE); glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, color_tex_3d); @@ -1007,7 +1007,7 @@ void MeshRendererContext::setup_pbr(std::string shader_path, glFinish(); - std::cout << "INFO: compiled pbr shaders" << std::endl; + // std::cout << "INFO: compiled pbr shaders" << std::endl; } GLuint MeshRendererContext::linkProgram(std::initializer_list shaders) { @@ -1056,7 +1056,7 @@ GLuint MeshRendererContext::compileShader(const std::string &filename, GLenum ty } const GLchar *srcBufferPtr = src.c_str(); - std::printf("Compiling GLSL shader: %s\n", filename.c_str()); + // std::printf("Compiling GLSL shader: %s\n", filename.c_str()); GLuint shader = glCreateShader(type); glShaderSource(shader, 1, &srcBufferPtr, nullptr); @@ -1149,10 +1149,10 @@ py::list MeshRendererContext::generateArrayTextures(std::vector fil std::vector texWidths; std::vector texChannels; std::vector buffer; - printf("number of textures %d\n", num_textures); + // printf("number of textures %d\n", num_textures); for (int i = 0; i < num_textures; i++) { std::string filename = filenames[i]; - std::cout << "Filename is: " << filename << std::endl; + // std::cout << "Filename is: " << filename << std::endl; int w; int h; int comp; @@ -1166,7 +1166,7 @@ py::list MeshRendererContext::generateArrayTextures(std::vector fil // force to 3 channels if (image == nullptr) throw(std::string("Failed to load texture")); - std::cout << "Size is w: " << w << " by h: " << h << std::endl; + // std::cout << "Size is w: " << w << " by h: " << h << std::endl; comp = 3; image_data.push_back(image); texHeights.push_back(h); @@ -1213,7 +1213,7 @@ py::list MeshRendererContext::generateArrayTextures(std::vector fil std::string tex_filename = filenames[i]; bool contains_keyword = tex_filename.find("floor") != std::string::npos || tex_filename.find("wall") != std::string::npos || tex_filename.find("ceiling") != std::string::npos; if (score >= texCutoff || contains_keyword) { - std::cout << "Appending texture with name: " << tex_filename << " to large bucket" << std::endl; + // std::cout << "Appending texture with name: " << tex_filename << " to large bucket" << std::endl; texIndices[0].push_back(i); tex_info_i.append(0); tex_info_i.append(firstTexLayerNum); @@ -1222,7 +1222,7 @@ py::list MeshRendererContext::generateArrayTextures(std::vector fil firstTexLayerNum++; } else { - std::cout << "Appending texture with name: " << tex_filename << " to small bucket" << std::endl; + // std::cout << "Appending texture with name: " << tex_filename << " to small bucket" << std::endl; texIndices[1].push_back(i); tex_info_i.append(1); tex_info_i.append(secondTexLayerNum); @@ -1234,13 +1234,13 @@ py::list MeshRendererContext::generateArrayTextures(std::vector fil texLayerData.append(tex_info_i); } - printf("Texture 1 is w:%d by h:%d by depth:%d 3D array texture. ID %d\n", texLayerDims[0], texLayerDims[1], firstTexLayerNum, texId1); - if (shouldShrinkSmallTextures) { - printf("Texture 2 is w:%d by h:%d by depth:%d 3D array texture. ID %d\n", smallTexBucketSize, smallTexBucketSize, secondTexLayerNum, texId2); - } - else { - printf("Texture 2 is w:%d by h:%d by depth:%d 3D array texture. ID %d\n", texLayerDims[2], texLayerDims[3], secondTexLayerNum, texId2); - } + // printf("Texture 1 is w:%d by h:%d by depth:%d 3D array texture. ID %d\n", texLayerDims[0], texLayerDims[1], firstTexLayerNum, texId1); + // if (shouldShrinkSmallTextures) { + // printf("Texture 2 is w:%d by h:%d by depth:%d 3D array texture. ID %d\n", smallTexBucketSize, smallTexBucketSize, secondTexLayerNum, texId2); + // } + // else { + // printf("Texture 2 is w:%d by h:%d by depth:%d 3D array texture. ID %d\n", texLayerDims[2], texLayerDims[3], secondTexLayerNum, texId2); + // } for (int i = 0; i < 2; i++) { GLuint currTexId = texId1; @@ -1253,8 +1253,8 @@ py::list MeshRendererContext::generateArrayTextures(std::vector fil GLint max_layers, max_size; glGetIntegerv(GL_MAX_ARRAY_TEXTURE_LAYERS, &max_layers); glGetIntegerv(GL_MAX_3D_TEXTURE_SIZE, &max_size); - printf("Max layer number: %d\n", max_layers); - printf("Max texture size: %d\n", max_size); + // printf("Max layer number: %d\n", max_layers); + // printf("Max texture size: %d\n", max_size); } int layerNum = firstTexLayerNum; @@ -1442,7 +1442,7 @@ py::list MeshRendererContext::generateArrayTextures(std::vector fil glUniform1f(glGetUniformLocation(shaderProgram, "use_two_light_probe"), (float)m_use_two_light_probe); glUniform1f(glGetUniformLocation(shaderProgram, "blend_highlight"), (float)blend_highlight); - printf("multidrawcount %d\n", multidrawCount); + // printf("multidrawcount %d\n", multidrawCount); glGenBuffers(1, &uboTexColorData); glBindBuffer(GL_UNIFORM_BUFFER, uboTexColorData); @@ -2005,7 +2005,7 @@ void MeshRendererContext::postRenderText() { // Restore previous FBO if one was bound // This allows us to render text both to the screen and to a texture - if (this->restore_prev_FBO) { + if (this->restore_prev_FBO = true) { glBindFramebuffer(GL_FRAMEBUFFER, this->m_prevFBO); } } diff --git a/igibson/render/cpp/mesh_renderer.h b/igibson/render/cpp/mesh_renderer.h index f74b2a1c9..52bfe4471 100644 --- a/igibson/render/cpp/mesh_renderer.h +++ b/igibson/render/cpp/mesh_renderer.h @@ -89,7 +89,7 @@ class MeshRendererContext { cudaGraphicsResource* cuda_res[MAX_NUM_RESOURCES]; #endif - void init() {}; + int init() {}; void release() {}; @@ -254,4 +254,4 @@ class MeshRendererContext { }; -#endif +#endif \ No newline at end of file diff --git a/igibson/render/cpp/tinyobjloader/sample.py b/igibson/render/cpp/tinyobjloader/sample.py index b681e3801..035ba2ae3 100644 --- a/igibson/render/cpp/tinyobjloader/sample.py +++ b/igibson/render/cpp/tinyobjloader/sample.py @@ -1,8 +1,7 @@ import sys - import tinyobjloader -filename = "../models/cornell_box.obj" +filename = "../models/cornell_box.obj"; reader = tinyobjloader.ObjReader() @@ -60,8 +59,8 @@ m.diffuse = [1, 2, 3] print(m.diffuse) - # print(m.shininess) - # print(m.illum) + #print(m.shininess) + #print(m.illum) shapes = reader.GetShapes() print("Num shapes: ", len(shapes)) diff --git a/igibson/render/cpp/vr_mesh_renderer.cpp b/igibson/render/cpp/vr_mesh_renderer.cpp index 214a9830e..88bfd4094 100644 --- a/igibson/render/cpp/vr_mesh_renderer.cpp +++ b/igibson/render/cpp/vr_mesh_renderer.cpp @@ -1,10 +1,7 @@ #include "vr_mesh_renderer.h" - -#ifdef WIN32 #include "SRanipal.h" #include "SRanipal_Eye.h" #include "SRanipal_Enums.h" -#endif #include #include @@ -162,7 +159,7 @@ py::list VRRendererContext::getDeviceCoordinateSystem(char* device) { // TIMELINE: Call after getDataForVRDevice, since this relies on knowing latest HMD transform py::list VRRendererContext::getEyeTrackingData() { py::list eyeData; - #ifdef WIN32 + // Transform data into Gibson coordinate system before returning to user glm::vec3 gibOrigin(vrToGib * glm::vec4(eyeTrackingData.origin, 1.0)); glm::vec3 gibDir(vrToGib * glm::vec4(eyeTrackingData.dir, 1.0)); @@ -188,27 +185,8 @@ py::list VRRendererContext::getEyeTrackingData() { eyeData.append(dir); eyeData.append(eyeTrackingData.leftPupilDiameter); eyeData.append(eyeTrackingData.rightPupilDiameter); - // Return dummy data with false validity if eye tracking is not enabled (on non-Windows system) - #else - py::list dummy_origin, dummy_dir; - float dummy_diameter_l, dummy_diameter_r; - eyeData.append(false); - eyeData.append(dummy_origin); - eyeData.append(dummy_dir); - eyeData.append(dummy_diameter_l); - eyeData.append(dummy_diameter_r); - #endif - return eyeData; -} -// Returns whether the current VR system supports eye tracking -bool VRRendererContext::hasEyeTrackingSupport() { - #ifdef WIN32 - return ViveSR::anipal::Eye::IsViveProEye(); - // Non-windows OS always have eye tracking disabled - #else - return false; - #endif + return eyeData; } // Gets the VR offset vector in form x, y, z @@ -224,6 +202,11 @@ py::list VRRendererContext::getVROffset() { return offset; } +// Returns whether the current VR system supports eye tracking +bool VRRendererContext::hasEyeTrackingSupport() { + return ViveSR::anipal::Eye::IsViveProEye(); +} + // Initialize the VR system and compositor // TIMELINE: Call during init of renderer, before height/width are set void VRRendererContext::initVR(bool useEyeTracking) { @@ -255,15 +238,12 @@ void VRRendererContext::initVR(bool useEyeTracking) { // No VR system offset by default vrOffsetVec = glm::vec3(0, 0, 0); - // Only activate eye tracking on Windows - #ifdef WIN32 // Set eye tracking boolean this->useEyeTracking = useEyeTracking; if (useEyeTracking) { shouldShutDownEyeTracking = false; initAnipal(); } - #endif } // Polls for VR events, such as button presses @@ -373,12 +353,10 @@ void VRRendererContext::releaseVR() { vr::VR_Shutdown(); m_pHMD = NULL; - #ifdef WIN32 if (this->useEyeTracking) { this->shouldShutDownEyeTracking = true; eyeTrackingThread->join(); } - #endif } // Sets the offset of the VR headset @@ -432,8 +410,8 @@ void VRRendererContext::updateVRData() { hmdData.index = idx; hmdData.isValidData = true; hmdActualPos = getPositionFromSteamVRMatrix(transformMat); - glm::vec3 steamVrMatrixPos = hmdActualPos + vrOffsetVec; - setSteamVRMatrixPos(steamVrMatrixPos, transformMat); + + setSteamVRMatrixPos(hmdActualPos + vrOffsetVec, transformMat); hmdData.deviceTransform = convertSteamVRMatrixToGlmMat4(transformMat); hmdData.devicePos = getPositionFromSteamVRMatrix(transformMat); @@ -469,8 +447,7 @@ void VRRendererContext::updateVRData() { leftControllerData.isValidData = getControllerDataResult; glm::vec3 leftControllerPos = getPositionFromSteamVRMatrix(transformMat); - glm::vec3 steamVrMatrixPos = leftControllerPos + vrOffsetVec; - setSteamVRMatrixPos(steamVrMatrixPos, transformMat); + setSteamVRMatrixPos(leftControllerPos + vrOffsetVec, transformMat); leftControllerData.deviceTransform = convertSteamVRMatrixToGlmMat4(transformMat); leftControllerData.devicePos = getPositionFromSteamVRMatrix(transformMat); @@ -486,9 +463,7 @@ void VRRendererContext::updateVRData() { rightControllerData.isValidData = getControllerDataResult; glm::vec3 rightControllerPos = getPositionFromSteamVRMatrix(transformMat); - glm::vec3 steamVrMatrixPos = rightControllerPos + vrOffsetVec; - - setSteamVRMatrixPos(steamVrMatrixPos, transformMat); + setSteamVRMatrixPos(rightControllerPos + vrOffsetVec, transformMat); rightControllerData.deviceTransform = convertSteamVRMatrixToGlmMat4(transformMat); rightControllerData.devicePos = getPositionFromSteamVRMatrix(transformMat); @@ -506,8 +481,7 @@ void VRRendererContext::updateVRData() { // Apply VR offset to tracker position glm::vec3 trackerPos = getPositionFromSteamVRMatrix(transformMat); - glm::vec3 steamVrMatrixPos = trackerPos + vrOffsetVec; - setSteamVRMatrixPos(steamVrMatrixPos, transformMat); + setSteamVRMatrixPos(trackerPos + vrOffsetVec, transformMat); if (this->trackerNamesToData.find(serial) != this->trackerNamesToData.end()) { this->trackerNamesToData[serial].index = idx; @@ -667,7 +641,6 @@ glm::vec3 VRRendererContext::getVec3ColFromMat4(int col_index, glm::mat4& mat) { return v; } -#ifdef WIN32 // Initializes the SRAnipal runtime, if the user selects this option void VRRendererContext::initAnipal() { if (!ViveSR::anipal::Eye::IsViveProEye()) { @@ -747,7 +720,6 @@ void VRRendererContext::pollAnipal() { } } } -#endif // Print string version of mat4 for debugging purposes void VRRendererContext::printMat4(glm::mat4& m) { @@ -901,8 +873,8 @@ PYBIND11_MODULE(VRRendererContext, m) { pymodule.def("getDataForVRTracker", &VRRendererContext::getDataForVRTracker); pymodule.def("getDeviceCoordinateSystem", &VRRendererContext::getDeviceCoordinateSystem); pymodule.def("getEyeTrackingData", &VRRendererContext::getEyeTrackingData); - pymodule.def("hasEyeTrackingSupport", &VRRendererContext::hasEyeTrackingSupport); pymodule.def("getVROffset", &VRRendererContext::getVROffset); + pymodule.def("hasEyeTrackingSupport", &VRRendererContext::hasEyeTrackingSupport); pymodule.def("initVR", &VRRendererContext::initVR); pymodule.def("pollVREvents", &VRRendererContext::pollVREvents); pymodule.def("postRenderVRForEye", &VRRendererContext::postRenderVRForEye); diff --git a/igibson/render/cpp/vr_mesh_renderer.h b/igibson/render/cpp/vr_mesh_renderer.h index 2e1f7035e..f0ea2761b 100644 --- a/igibson/render/cpp/vr_mesh_renderer.h +++ b/igibson/render/cpp/vr_mesh_renderer.h @@ -2,11 +2,9 @@ #define VR_MESH_RENDERER_HEADER #include "glfw_mesh_renderer.h" -#ifdef WIN32 #include "SRanipal.h" #include "SRanipal_Eye.h" #include "SRanipal_Enums.h" -#endif #include @@ -77,16 +75,12 @@ class VRRendererContext : public GLFWRendererContext { glm::mat4 gibToVr; glm::mat4 vrToGib; - bool useEyeTracking; - bool shouldShutDownEyeTracking; - - #ifdef WIN32 // SRAnipal variables + bool useEyeTracking; std::thread* eyeTrackingThread; ViveSR::anipal::Eye::EyeData eyeData; - #endif - int result; + bool shouldShutDownEyeTracking; // Struct storing eye data for SR anipal - we only return origin and direction in world space // As most users will want to use this ray to query intersection or something similar diff --git a/igibson/render/mesh_renderer/get_available_devices.py b/igibson/render/mesh_renderer/get_available_devices.py index f5410797e..c50ae121f 100644 --- a/igibson/render/mesh_renderer/get_available_devices.py +++ b/igibson/render/mesh_renderer/get_available_devices.py @@ -16,28 +16,22 @@ def get_available_devices(): try: num_devices = int(subprocess.check_output(["{}/query_devices".format(executable_path)])) except subprocess.CalledProcessError as e: - return [0], {0: {}} + return [0] FNULL = open(os.devnull, "w") - device_list = [] - device_info = {} + available_devices = [] for i in range(num_devices): try: if b"NVIDIA" in subprocess.check_output(["{}/test_device".format(executable_path), str(i)], stderr=FNULL): - device_list.append(i) - device_info[i] = {"device_type": "nvidia"} - logging.info("Device {} is available for rendering".format(i)) - elif b"Intel" in subprocess.check_output(["{}/test_device".format(executable_path), str(i)], stderr=FNULL): - device_list.append(i) - device_info[i] = {"device_type": "intel"} + available_devices.append(i) logging.info("Device {} is available for rendering".format(i)) except subprocess.CalledProcessError as e: logging.info(e) logging.info("Device {} is not available for rendering".format(i)) FNULL.close() - return device_list, device_info + return available_devices def get_cuda_device(minor_idx): @@ -69,6 +63,6 @@ def get_cuda_device(minor_idx): if __name__ == "__main__": - graphics_devices, device_info = get_available_devices() + graphics_devices = get_available_devices() logging.info("Graphics Devices: {}".format(graphics_devices)) logging.info("Graphics Device Ids: {}".format([get_cuda_device(item) for item in graphics_devices])) diff --git a/igibson/render/mesh_renderer/instances.py b/igibson/render/mesh_renderer/instances.py index d36189f52..cecf7e7fe 100644 --- a/igibson/render/mesh_renderer/instances.py +++ b/igibson/render/mesh_renderer/instances.py @@ -18,12 +18,11 @@ def __init__( id, link_ids, pybullet_uuid, - ig_object, class_id, poses_trans, poses_rot, dynamic, - softbody, + robot=None, use_pbr=True, use_pbr_mapping=True, shadow_caster=True, @@ -37,7 +36,7 @@ def __init__( :param poses_trans: initial translations for each visual object :param poses_rot: initial rotation matrix for each visual object :param dynamic: whether the instance group is dynamic - :param softbody: whether the instance group is for a soft body + :param robot: The robot associated with this InstanceGroup :param use_pbr: whether to use PBR :param use_pbr_mapping: whether to use PBR mapping :param shadow_caster: whether to cast shadow @@ -48,16 +47,15 @@ def __init__( self.id = id self.link_ids = link_ids self.class_id = class_id + self.robot = robot if len(objects) > 0: self.renderer = objects[0].renderer else: self.renderer = None self.pybullet_uuid = pybullet_uuid - self.ig_object = ig_object self.dynamic = dynamic self.tf_tree = None - self.softbody = softbody self.use_pbr = use_pbr self.use_pbr_mapping = use_pbr_mapping self.shadow_caster = shadow_caster @@ -101,33 +99,6 @@ def render(self, shadow_pass=0): for i, visual_object in enumerate(self.objects): for object_idx in visual_object.VAO_ids: - if self.softbody: - # construct new vertex position into shape format - vertices = p.getMeshData(self.pybullet_uuid)[1] - vertices_flattened = [item for sublist in vertices for item in sublist] - vertex_position = np.array(vertices_flattened).reshape((len(vertices_flattened) // 3, 3)) - shape = self.renderer.shapes[object_idx] - n_indices = len(shape.mesh.indices) - np_indices = shape.mesh.numpy_indices().reshape((n_indices, 3)) - shape_vertex_index = np_indices[:, 0] - shape_vertex = vertex_position[shape_vertex_index] - - # update new vertex position in buffer data - new_data = self.renderer.vertex_data[object_idx] - new_data[:, 0 : shape_vertex.shape[1]] = shape_vertex - new_data = new_data.astype(np.float32) - - # transform and rotation already included in mesh data - self.pose_trans = np.eye(4) - self.pose_rot = np.eye(4) - self.last_trans = np.eye(4) - self.last_rot = np.eye(4) - - # update buffer data into VBO - self.renderer.r.render_softbody_instance( - self.renderer.VAOs[object_idx], self.renderer.VBOs[object_idx], new_data - ) - self.renderer.r.init_pos_instance( self.renderer.shaderProgram, self.poses_trans[i], @@ -261,3 +232,239 @@ def __str__(self): def __repr__(self): return self.__str__() + + +class Robot(InstanceGroup): + """ + A specfial type of InstanceGroup used for Robots + """ + + def __init__(self, *args, **kwargs): + super(Robot, self).__init__(*args, **kwargs) + + def __str__(self): + return "Robot({}) -> Objects({})".format(self.id, ",".join([str(object.id) for object in self.objects])) + + +class Instance(object): + """ + Instance is one instance of a visual object. + One visual object can have multiple instances to save memory. + """ + + def __init__( + self, + object, + id, + pybullet_uuid, + class_id, + pose_trans, + pose_rot, + dynamic, + softbody, + use_pbr=True, + use_pbr_mapping=True, + shadow_caster=True, + parent_body=None, + ): + """ + :param object: visual object + :param id: id of this instance_group + :param pybullet_uuid: body id in pybullet + :param class_id: class_id to render semantics + :param pose_trans: initial translations for the visual object + :param pose_rot: initial rotation matrix for the visual object + :param dynamic: whether the instance is dynamic + :param softbody: whether the instance is soft body + :param use_pbr: whether to use PBR + :param use_pbr_mapping: whether to use PBR mapping + :param shadow_caster: whether to cast shadow + :param parent_body: parent body name of current xml element (MuJoCo XML) + """ + self.object = object + self.pose_trans = pose_trans + self.pose_rot = pose_rot + self.id = id + self.class_id = class_id + self.renderer = object.renderer + self.pybullet_uuid = pybullet_uuid + self.dynamic = dynamic + self.softbody = softbody + self.use_pbr = use_pbr + self.use_pbr_mapping = use_pbr_mapping + self.shadow_caster = shadow_caster + self.roughness = 1 + self.metalness = 0 + # Determines whether object will be rendered + self.hidden = False + # Indices into optimized buffers such as color information and transformation buffer + # These values are used to set buffer information during simulation + self.or_buffer_indices = None + self.last_trans = np.copy(pose_trans) + self.last_rot = np.copy(pose_rot) + self.parent_body = parent_body + self.highlight = False + + def set_highlight(self, highlight): + self.highlight = highlight + + def render(self, shadow_pass=0): + """ + Render this instance + shadow_pass = 0: normal rendering mode, disable shadow + shadow_pass = 1: enable_shadow, rendering depth map from light space + shadow_pass = 2: use rendered depth map to calculate shadow + + :param shadow_pass: shadow pass mode + """ + if self.renderer is None: + return + + # softbody: reload vertex position + if self.softbody: + # construct new vertex position into shape format + object_idx = self.object.VAO_ids[0] + vertices = p.getMeshData(self.pybullet_uuid)[1] + vertices_flattened = [item for sublist in vertices for item in sublist] + vertex_position = np.array(vertices_flattened).reshape((len(vertices_flattened) // 3, 3)) + shape = self.renderer.shapes[object_idx] + n_indices = len(shape.mesh.indices) + np_indices = shape.mesh.numpy_indices().reshape((n_indices, 3)) + shape_vertex_index = np_indices[:, 0] + shape_vertex = vertex_position[shape_vertex_index] + + # update new vertex position in buffer data + new_data = self.renderer.vertex_data[object_idx] + new_data[:, 0 : shape_vertex.shape[1]] = shape_vertex + new_data = new_data.astype(np.float32) + + # transform and rotation already included in mesh data + self.pose_trans = np.eye(4) + self.pose_rot = np.eye(4) + self.last_trans = np.eye(4) + self.last_rot = np.eye(4) + + # update buffer data into VBO + self.renderer.r.render_softbody_instance( + self.renderer.VAOs[object_idx], self.renderer.VBOs[object_idx], new_data + ) + + self.renderer.r.initvar( + self.renderer.shaderProgram, + self.renderer.V, + self.renderer.last_V, + self.renderer.lightV, + shadow_pass, + self.renderer.P, + self.renderer.lightP, + self.renderer.camera, + self.renderer.lightpos, + self.renderer.lightcolor, + ) + + self.renderer.r.init_pos_instance( + self.renderer.shaderProgram, self.pose_trans, self.pose_rot, self.last_trans, self.last_rot + ) + + for object_idx in self.object.VAO_ids: + current_material = self.renderer.material_idx_to_material_instance_mapping[ + self.renderer.shape_material_idx[object_idx] + ] + self.renderer.r.init_material_instance( + self.renderer.shaderProgram, + float(self.class_id) / MAX_CLASS_COUNT, + float(self.id) / MAX_INSTANCE_COUNT, + current_material.kd, + float(current_material.is_texture()), + float(self.use_pbr), + float(self.use_pbr_mapping), + float(self.metalness), + float(self.roughness), + current_material.transform_param, + ) + try: + + texture_id = current_material.texture_id + metallic_texture_id = current_material.metallic_texture_id + roughness_texture_id = current_material.roughness_texture_id + normal_texture_id = current_material.normal_texture_id + + if texture_id is None: + texture_id = -1 + if metallic_texture_id is None: + metallic_texture_id = -1 + if roughness_texture_id is None: + roughness_texture_id = -1 + if normal_texture_id is None: + normal_texture_id = -1 + + if self.renderer.msaa: + buffer = self.renderer.fbo_ms + else: + buffer = self.renderer.fbo + + self.renderer.r.draw_elements_instance( + self.renderer.material_idx_to_material_instance_mapping[ + self.renderer.shape_material_idx[object_idx] + ].is_texture(), + texture_id, + metallic_texture_id, + roughness_texture_id, + normal_texture_id, + self.renderer.depth_tex_shadow, + self.renderer.VAOs[object_idx], + self.renderer.faces[object_idx].size, + self.renderer.faces[object_idx], + buffer, + ) + finally: + self.renderer.r.cglBindVertexArray(0) + + self.renderer.r.cglUseProgram(0) + + def get_pose_in_camera(self): + """ + Get instance pose in camera reference frame + """ + mat = self.renderer.V.dot(self.pose_trans.T).dot(self.pose_rot).T + pose = np.concatenate([mat2xyz(mat), safemat2quat(mat[:3, :3].T)]) + return pose + + def set_position(self, pos): + """ + Set position + + :param pos: position + """ + self.last_trans = np.copy(self.pose_trans) + self.pose_trans = np.ascontiguousarray(xyz2mat(pos)) + + def set_rotation(self, rot): + """ + Set orientation + + :param rot: rotation matrix + """ + self.last_rot = np.copy(self.pose_rot) + self.pose_rot = rot + + def dump(self): + """ + Dump vertex and face information + """ + vertices_info = [] + faces_info = [] + for vertex_data_index, face_index in zip(self.object.vertex_data_indices, self.object.face_indices): + vertices_info.append( + transform_vertex( + self.renderer.vertex_data[vertex_data_index], pose_rot=self.pose_rot, pose_trans=self.pose_trans + ) + ) + faces_info.append(self.renderer.faces[face_index]) + return vertices_info, faces_info + + def __str__(self): + return "Instance({}) -> Object({})".format(self.id, self.object.id) + + def __repr__(self): + return self.__str__() diff --git a/igibson/render/mesh_renderer/libcryptopp.8.6.dylib b/igibson/render/mesh_renderer/libcryptopp.8.6.dylib new file mode 100755 index 000000000..737bf283c Binary files /dev/null and b/igibson/render/mesh_renderer/libcryptopp.8.6.dylib differ diff --git a/igibson/render/mesh_renderer/libcryptopp.dylib b/igibson/render/mesh_renderer/libcryptopp.dylib new file mode 120000 index 000000000..e2b0a542d --- /dev/null +++ b/igibson/render/mesh_renderer/libcryptopp.dylib @@ -0,0 +1 @@ +libcryptopp.8.6.dylib \ No newline at end of file diff --git a/igibson/render/mesh_renderer/mesh_renderer_cpu.py b/igibson/render/mesh_renderer/mesh_renderer_cpu.py index f63dfdd58..e7e894431 100644 --- a/igibson/render/mesh_renderer/mesh_renderer_cpu.py +++ b/igibson/render/mesh_renderer/mesh_renderer_cpu.py @@ -12,18 +12,16 @@ import igibson.render.mesh_renderer as mesh_renderer from igibson.render.mesh_renderer import tinyobjloader from igibson.render.mesh_renderer.get_available_devices import get_available_devices -from igibson.render.mesh_renderer.instances import InstanceGroup -from igibson.render.mesh_renderer.materials import Material, ProceduralMaterial +from igibson.render.mesh_renderer.instances import Instance, InstanceGroup, Robot +from igibson.render.mesh_renderer.materials import Material, ProceduralMaterial, RandomizedMaterial from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings from igibson.render.mesh_renderer.text import Text, TextManager from igibson.render.mesh_renderer.visual_object import VisualObject from igibson.robots.behavior_robot import BehaviorRobot -from igibson.robots.robot_base import BaseRobot from igibson.utils.constants import AVAILABLE_MODALITIES, MAX_CLASS_COUNT, MAX_INSTANCE_COUNT, ShadowPass from igibson.utils.mesh_util import lookat, mat2xyz, ortho, perspective, quat2rotmat, safemat2quat, xyz2mat, xyzw2wxyz Image.MAX_IMAGE_PIXELS = None -NO_MATERIAL_DEFINED_IN_SHAPE_AND_NO_OVERWRITE_SUPPLIED = -1 class MeshRenderer(object): @@ -47,8 +45,8 @@ def __init__( :param height: width of the renderer output :param vertical_fov: vertical field of view for the renderer :param device_idx: which GPU to run the renderer on - :param rendering_settings: rendering settings - :param simulator: simulator object + :param render_settings: rendering settings + :param simulator: Simulator object. """ self.simulator = simulator self.rendering_settings = rendering_settings @@ -69,7 +67,6 @@ def __init__( self.height = height self.faces = [] self.instances = [] - self.update_instance_id_to_pb_id_map() self.fisheye = rendering_settings.use_fisheye self.optimized = rendering_settings.optimized self.texture_files = {} @@ -85,13 +82,47 @@ def __init__( self.text_manager = TextManager(self) self.texts = [] - self.msaa = rendering_settings.msaa + device = None + """ + device_idx is the major id + device is the minor id + you can get it from nvidia-smi -a + + The minor number for the device is such that the Nvidia device node file for each GPU will have the form + /dev/nvidia[minor number]. Available only on Linux platform. + + TODO: add device management for windows platform. + """ + + if os.environ.get("GIBSON_DEVICE_ID", None): + device = int(os.environ.get("GIBSON_DEVICE_ID")) + logging.info( + "GIBSON_DEVICE_ID environment variable has been manually set. " + "Using device {} for rendering".format(device) + ) + else: + if self.platform != "Windows": + available_devices = get_available_devices() + if device_idx < len(available_devices): + device = available_devices[device_idx] + logging.info("Using device {} for rendering".format(device)) + else: + logging.info("Device index is larger than number of devices, falling back to use 0") + logging.info( + "If you have trouble using EGL, please visit our trouble shooting guide", + "at http://svl.stanford.edu/igibson/docs/issues.html", + ) + + device = 0 + self.device_idx = device_idx + self.device_minor = device + self.msaa = rendering_settings.msaa + if self.platform == "Darwin" and self.optimized: + logging.error("Optimized renderer is not supported on Mac") + exit() if self.platform == "Darwin": - if self.optimized: - logging.error("Optimized renderer is not supported on Mac") - exit() - from igibson.render.mesh_renderer import GLFWRendererContext # type: ignore + from igibson.render.mesh_renderer import GLFWRendererContext self.r = GLFWRendererContext.GLFWRendererContext( width, @@ -101,9 +132,8 @@ def __init__( self.rendering_settings.show_glfw_window, rendering_settings.fullscreen, ) - - elif self.platform == "Windows" or self.__class__.__name__ == "MeshRendererVR": - from igibson.render.mesh_renderer import VRRendererContext # type: ignore + elif self.platform == "Windows": + from igibson.render.mesh_renderer import VRRendererContext self.r = VRRendererContext.VRRendererContext( width, @@ -113,47 +143,10 @@ def __init__( self.rendering_settings.show_glfw_window, rendering_settings.fullscreen, ) - - elif self.platform == "Linux": + else: from igibson.render.mesh_renderer import EGLRendererContext - """ - device_idx is the major id - device is the minor id - you can get it from nvidia-smi -a - - The minor number for the device is such that the Nvidia device node file for each GPU will have the form - /dev/nvidia[minor number]. Available only on Linux platform. - - """ - - device = os.environ.get("GIBSON_DEVICE_ID", None) - if device: - device = int(device) - logging.info( - "GIBSON_DEVICE_ID environment variable has been manually set. " - "Using device {} for rendering".format(device) - ) - else: - available_devices, _ = get_available_devices() - if device_idx < len(available_devices): - device = available_devices[device_idx] - logging.info("Using device {} for rendering".format(device)) - else: - logging.info("Device index is larger than number of devices, falling back to use 0") - logging.info( - "If you have trouble using EGL, please visit our trouble shooting guide" - "at http://svl.stanford.edu/igibson/docs/issues.html", - ) - - device = 0 - - self.device_idx = device_idx - self.device_minor = device - self.r = EGLRendererContext.EGLRendererContext(width, height, device) - else: - raise Exception("Unsupported platform and renderer combination") self.r.init() @@ -167,30 +160,91 @@ def __init__( logging.debug("Is using fisheye camera: {}".format(self.fisheye)) - if self.rendering_settings.glsl_version_override: - glsl_version = str(self.rendering_settings.glsl_version_override) - shader_available = glsl_version in ["450", "460"] - assert shader_available, "Error: only GLSL version 450 and 460 shaders are supported" - else: - glsl_version = "460" - if self.fisheye: logging.error("Fisheye is currently not supported.") exit(1) else: if self.platform == "Darwin": - self.shaderProgram = self.get_shader_program("410", "vert.shader", "frag.shader") - self.textShaderProgram = self.get_shader_program("410", "text_vert.shader", "text_frag.shader") + self.shaderProgram = self.r.compile_shader_meshrenderer( + "".join( + open( + os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "410", "vert.shader") + ).readlines() + ), + "".join( + open( + os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "410", "frag.shader") + ).readlines() + ), + ) + self.textShaderProgram = self.r.compile_shader_meshrenderer( + "".join( + open( + os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "410", "text_vert.shader") + ).readlines() + ), + "".join( + open( + os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "410", "text_frag.shader") + ).readlines() + ), + ) else: if self.optimized: - self.shaderProgram = self.get_shader_program( - glsl_version, "optimized_vert.shader", "optimized_frag.shader" + self.shaderProgram = self.r.compile_shader_meshrenderer( + "".join( + open( + os.path.join( + os.path.dirname(mesh_renderer.__file__), "shaders", "450", "optimized_vert.shader" + ) + ).readlines() + ), + "".join( + open( + os.path.join( + os.path.dirname(mesh_renderer.__file__), "shaders", "450", "optimized_frag.shader" + ) + ).readlines() + ), ) else: - self.shaderProgram = self.get_shader_program(glsl_version, "vert.shader", "frag.shader") - self.textShaderProgram = self.get_shader_program(glsl_version, "text_vert.shader", "text_frag.shader") + self.shaderProgram = self.r.compile_shader_meshrenderer( + "".join( + open( + os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "450", "vert.shader") + ).readlines() + ), + "".join( + open( + os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "450", "frag.shader") + ).readlines() + ), + ) + self.textShaderProgram = self.r.compile_shader_meshrenderer( + "".join( + open( + os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "450", "text_vert.shader") + ).readlines() + ), + "".join( + open( + os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "450", "text_frag.shader") + ).readlines() + ), + ) - self.skyboxShaderProgram = self.get_shader_program("410", "skybox_vs.glsl", "skybox_fs.glsl") + self.skyboxShaderProgram = self.r.compile_shader_meshrenderer( + "".join( + open( + os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "410", "skybox_vs.glsl") + ).readlines() + ), + "".join( + open( + os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "410", "skybox_fs.glsl") + ).readlines() + ), + ) # default light looking down and tilted self.set_light_position_direction([0, 0, 2], [0, 0.5, 0]) @@ -226,40 +280,16 @@ def __init__( self.skybox_size = rendering_settings.skybox_size if not self.platform == "Darwin" and rendering_settings.enable_pbr: - self.setup_pbr(glsl_version) + self.setup_pbr() self.setup_lidar_param() # Set up text FBO self.text_manager.gen_text_fbo() - def get_shader_program(self, glsl_version, vertex_source, fragment_source): - """ - Get shader program. - - :param glsl_version: GLSL version - :param vertex_source: vertex shader source - :param fragment_source: fragment shader source - :return: a program object to which vertex shader and fragment shader are attached - """ - return self.r.compile_shader_meshrenderer( - "".join( - open( - os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", glsl_version, vertex_source) - ).readlines() - ), - "".join( - open( - os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", glsl_version, fragment_source) - ).readlines() - ), - ) - - def setup_pbr(self, glsl_version): + def setup_pbr(self): """ - Set up physics-based rendering. - - :param glsl_version: GLSL version + Set up physics-based rendering """ if ( os.path.exists(self.rendering_settings.env_texture_filename) @@ -267,7 +297,7 @@ def setup_pbr(self, glsl_version): or os.path.exists(self.rendering_settings.env_texture_filename3) ): self.r.setup_pbr( - os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", glsl_version), + os.path.join(os.path.dirname(mesh_renderer.__file__), "shaders", "450"), self.rendering_settings.env_texture_filename, self.rendering_settings.env_texture_filename2, self.rendering_settings.env_texture_filename3, @@ -281,7 +311,7 @@ def setup_pbr(self, glsl_version): def set_light_position_direction(self, position, target): """ - Set light position and orientation. + Set light position and orientation :param position: light position :param target: light target @@ -291,7 +321,7 @@ def set_light_position_direction(self, position, target): def setup_framebuffer(self): """ - Set up framebuffers for the renderer. + Set up framebuffers for the renderer """ [ self.fbo, @@ -320,13 +350,12 @@ def setup_framebuffer(self): self.depth_tex_shadow = self.r.allocateTexture(self.width, self.height) - def load_texture_file(self, tex_filename, texture_scale): + def load_texture_file(self, tex_filename): """ - Load the texture file into the renderer. + Load the texture file into the renderer :param tex_filename: texture file filename - :param texture_scale: a file-specific texture scale to be multiplied with the global scale in the settings - :return: texture id of this texture in the renderer + :return texture_id: texture id of this texture in the renderer """ # if texture is None or does not exist, return None if tex_filename is None or (not os.path.isfile(tex_filename)): @@ -340,36 +369,32 @@ def load_texture_file(self, tex_filename, texture_scale): # assume optimized renderer will have texture id starting from 0 texture_id = len(self.texture_files) else: - texture_id = self.r.loadTexture( - tex_filename, texture_scale * self.rendering_settings.texture_scale, igibson.key_path - ) + texture_id = self.r.loadTexture(tex_filename, self.rendering_settings.texture_scale, igibson.key_path) self.textures.append(texture_id) self.texture_files[tex_filename] = texture_id return texture_id - def load_procedural_material(self, material, texture_scale): + def load_procedural_material(self, material): material.lookup_or_create_transformed_texture() has_encrypted_texture = os.path.exists(os.path.join(material.material_folder, "DIFFUSE.encrypted.png")) suffix = ".encrypted.png" if has_encrypted_texture else ".png" - material.texture_id = self.load_texture_file( - os.path.join(material.material_folder, "DIFFUSE{}".format(suffix)), texture_scale - ) + material.texture_id = self.load_texture_file(os.path.join(material.material_folder, "DIFFUSE{}".format(suffix))) material.metallic_texture_id = self.load_texture_file( - os.path.join(material.material_folder, "METALLIC{}".format(suffix)), texture_scale + os.path.join(material.material_folder, "METALLIC{}".format(suffix)) ) material.roughness_texture_id = self.load_texture_file( - os.path.join(material.material_folder, "ROUGHNESS{}".format(suffix)), texture_scale + os.path.join(material.material_folder, "ROUGHNESS{}".format(suffix)) ) material.normal_texture_id = self.load_texture_file( - os.path.join(material.material_folder, "NORMAL{}".format(suffix)), texture_scale + os.path.join(material.material_folder, "NORMAL{}".format(suffix)) ) for state in material.states: - transformed_diffuse_id = self.load_texture_file(material.texture_filenames[state], texture_scale) + transformed_diffuse_id = self.load_texture_file(material.texture_filenames[state]) material.texture_ids[state] = transformed_diffuse_id material.default_texture_id = material.texture_id - def load_randomized_material(self, material, texture_scale): + def load_randomized_material(self, material): """ Load all the texture files in the RandomizedMaterial. Populate material_ids with the texture id assigned by the renderer. @@ -386,7 +411,7 @@ def load_randomized_material(self, material, texture_scale): for material_instance in material.material_files[material_class]: material_id_instance = {} for key in material_instance: - material_id_instance[key] = self.load_texture_file(material_instance[key], texture_scale) + material_id_instance[key] = self.load_texture_file(material_instance[key]) material.material_ids[material_class].append(material_id_instance) material.randomize() @@ -398,6 +423,7 @@ def load_object( transform_pos=None, input_kd=None, texture_scale=1.0, + load_texture=True, overwrite_material=None, ): """ @@ -408,18 +434,19 @@ def load_object( :param transform_orn: rotation quaternion, convention xyzw :param transform_pos: translation for loading, it is a list of length 3 :param input_kd: if loading material fails, use this default material. input_kd should be a list of length 3 - :param texture_scale: texture scale for the object, downsample to save memory + :param texture_scale: texture scale for the object, downsample to save memory. + :param load_texture: load texture or not :param overwrite_material: whether to overwrite the default Material (usually with a RandomizedMaterial for material randomization) :return: VAO_ids """ if self.optimization_process_executed and self.optimized: logging.error( - "Using optimized renderer and optimization process is already excuted, cannot add new objects" + "Using optimized renderer and optimization process is already excuted, cannot add new " "objects" ) return reader = tinyobjloader.ObjReader() - logging.info("Loading {}".format(obj_path)) + # logging.info("Loading {}".format(obj_path)) if obj_path.endswith("encrypted.obj"): if not os.path.exists(igibson.key_path): raise FileNotFoundError( @@ -460,32 +487,24 @@ def load_object( # set the default values of variable before being modified later. num_existing_mats = len(self.material_idx_to_material_instance_mapping) # Number of current Material elements + num_added_materials = len(materials) - # No MTL is supplied, or MTL is empty - if len(materials) == 0: - # Case when mesh obj is without mtl file but overwrite material is specified. - if overwrite_material is not None: - self.material_idx_to_material_instance_mapping[num_existing_mats] = overwrite_material - num_added_materials = 1 - else: - num_added_materials = 0 - else: + if num_added_materials > 0: # Deparse the materials in the obj file by loading textures into the renderer's memory and creating a # Material element for them - num_added_materials = len(materials) for i, item in enumerate(materials): if overwrite_material is not None: + if isinstance(overwrite_material, RandomizedMaterial): + self.load_randomized_material(overwrite_material) + elif isinstance(overwrite_material, ProceduralMaterial): + self.load_procedural_material(overwrite_material) material = overwrite_material - elif item.diffuse_texname != "" and self.rendering_settings.load_textures: + elif item.diffuse_texname != "" and load_texture: obj_dir = os.path.dirname(obj_path) - texture = self.load_texture_file(os.path.join(obj_dir, item.diffuse_texname), texture_scale) - texture_metallic = self.load_texture_file( - os.path.join(obj_dir, item.metallic_texname), texture_scale - ) - texture_roughness = self.load_texture_file( - os.path.join(obj_dir, item.roughness_texname), texture_scale - ) - texture_normal = self.load_texture_file(os.path.join(obj_dir, item.bump_texname), texture_scale) + texture = self.load_texture_file(os.path.join(obj_dir, item.diffuse_texname)) + texture_metallic = self.load_texture_file(os.path.join(obj_dir, item.metallic_texname)) + texture_roughness = self.load_texture_file(os.path.join(obj_dir, item.roughness_texname)) + texture_normal = self.load_texture_file(os.path.join(obj_dir, item.bump_texname)) material = Material( "texture", texture_id=texture, @@ -501,19 +520,22 @@ def load_object( else: material = Material("color", kd=item.diffuse) self.material_idx_to_material_instance_mapping[num_existing_mats + i] = material + else: + # Case when mesh obj is without mtl file but overwrite material is specified. + if overwrite_material is not None: + self.material_idx_to_material_instance_mapping[num_existing_mats] = overwrite_material + num_added_materials = 1 - # material index = num_existing_mats ... num_existing_mats + num_added_materials - 1 (inclusive) are using - # materials from mesh or from overwrite_material + # material index = num_existing_mats ... num_existing_mats + num_added_materials - 1 are using materials from mesh or + # from overwrite_material # material index = num_existing_mats + num_added_materials is a fail-safe default material - idx_of_failsafe_material = num_existing_mats + num_added_materials - if input_kd is not None: # append the default material in the end, in case material loading fails - self.material_idx_to_material_instance_mapping[idx_of_failsafe_material] = Material( + self.material_idx_to_material_instance_mapping[num_existing_mats + num_added_materials] = Material( "color", kd=input_kd, texture_id=-1 ) else: - self.material_idx_to_material_instance_mapping[idx_of_failsafe_material] = Material( + self.material_idx_to_material_instance_mapping[num_existing_mats + num_added_materials] = Material( "color", kd=[0.5, 0.5, 0.5], texture_id=-1 ) @@ -525,18 +547,13 @@ def load_object( for shape in shapes: logging.debug("Shape name: {}".format(shape.name)) - if len(shape.mesh.material_ids) == 0 or shape.mesh.material_ids[0] == -1: - # material not found, or invalid material, as defined here - # https://github.com/tinyobjloader/tinyobjloader/blob/master/tiny_obj_loader.h#L2997 + if len(shape.mesh.material_ids) == 0: if overwrite_material is not None: material_id = 0 - # shape don't have material id, use material 0, which is the overwrite material else: - material_id = NO_MATERIAL_DEFINED_IN_SHAPE_AND_NO_OVERWRITE_SUPPLIED - # if no material and no overwrite material is supplied + material_id = -1 # if no material and no overwrite material is supplied else: material_id = shape.mesh.material_ids[0] - # assumption: each shape only have one material logging.debug("material_id = {}".format(material_id)) logging.debug("num_indices = {}".format(len(shape.mesh.indices))) @@ -578,13 +595,13 @@ def load_object( # Translate the shape after they are scaled shape_vertex += np.array(transform_pos) - # Compute tangents and bitangents for tangent space normal mapping. v0 = shape_vertex[0::3, :] v1 = shape_vertex[1::3, :] v2 = shape_vertex[2::3, :] uv0 = shape_texcoord[0::3, :] uv1 = shape_texcoord[1::3, :] uv2 = shape_texcoord[2::3, :] + delta_pos1 = v1 - v0 delta_pos2 = v2 - v0 delta_uv1 = uv1 - uv0 @@ -592,10 +609,8 @@ def load_object( r = 1.0 / (delta_uv1[:, 0] * delta_uv2[:, 1] - delta_uv1[:, 1] * delta_uv2[:, 0]) tangent = (delta_pos1 * delta_uv2[:, 1][:, None] - delta_pos2 * delta_uv1[:, 1][:, None]) * r[:, None] bitangent = (delta_pos2 * delta_uv1[:, 0][:, None] - delta_pos1 * delta_uv2[:, 0][:, None]) * r[:, None] - # Set the same tangent and bitangent for all three vertices of the triangle. - tangent = tangent.repeat(3, axis=0) bitangent = bitangent.repeat(3, axis=0) - + tangent = tangent.repeat(3, axis=0) vertices = np.concatenate([shape_vertex, shape_normal, shape_texcoord, tangent, bitangent], axis=-1) faces = np.array(range(len(vertices))).reshape((len(vertices) // 3, 3)) vertexData = vertices.astype(np.float32) @@ -609,9 +624,8 @@ def load_object( self.vertex_data.append(vertexData) self.shapes.append(shape) # if material loading fails, use the default material - if material_id == NO_MATERIAL_DEFINED_IN_SHAPE_AND_NO_OVERWRITE_SUPPLIED: - # use fall back material - self.shape_material_idx.append(idx_of_failsafe_material) + if material_id == -1: + self.shape_material_idx.append(num_added_materials + num_existing_mats) else: self.shape_material_idx.append(material_id + num_existing_mats) @@ -629,32 +643,85 @@ def load_object( self.visual_objects.append(new_obj) return VAO_ids + def add_instance( + self, + object_id, + pybullet_uuid=None, + class_id=0, + pose_trans=np.eye(4), + pose_rot=np.eye(4), + dynamic=False, + softbody=False, + use_pbr=True, + use_pbr_mapping=True, + shadow_caster=True, + parent_body=None, + ): + """ + Create instance for a visual object and link it to pybullet + + :param object_id: id of visual object + :param pybullet_uuid: body id in pybullet + :param class_id: class_id to render semantics + :param pose_trans: initial translations for the visual object + :param pose_rot: initial rotation matrix for the visual object + :param dynamic: whether the instance is dynamic + :param softbody: whether the instance is soft body + :param use_pbr: whether to use PBR + :param use_pbr_mapping: whether to use PBR mapping + :param shadow_caster: whether to cast shadow + :param parent_body: parent body name of current xml element (MuJoCo XML) + """ + if self.optimization_process_executed and self.optimized: + logging.error( + "Using optimized renderer and optimization process is already excuted, cannot add new " "objects" + ) + return + + use_pbr = use_pbr and self.rendering_settings.enable_pbr + use_pbr_mapping = use_pbr_mapping and self.rendering_settings.enable_pbr + + instance = Instance( + self.visual_objects[object_id], + id=len(self.instances), + pybullet_uuid=pybullet_uuid, + class_id=class_id, + pose_trans=pose_trans, + pose_rot=pose_rot, + dynamic=dynamic, + softbody=softbody, + use_pbr=use_pbr, + use_pbr_mapping=use_pbr_mapping, + shadow_caster=shadow_caster, + parent_body=parent_body, + ) + self.instances.append(instance) + def add_instance_group( self, object_ids, - link_ids=[-1], - poses_trans=[np.eye(4)], - poses_rot=[np.eye(4)], + link_ids, + poses_trans, + poses_rot, pybullet_uuid=None, - ig_object=None, class_id=0, dynamic=False, - softbody=False, + robot=None, use_pbr=True, use_pbr_mapping=True, shadow_caster=True, ): """ - Create an instance group for a list of visual objects and link it to pybullet. + Create an instance group for a list of visual objects and link it to pybullet :param object_ids: object ids of the visual objects :param link_ids: link_ids in pybullet :param poses_trans: initial translations for each visual object :param poses_rot: initial rotation matrix for each visual object :param pybullet_uuid: body id in pybullet - :param ig_object: iGibson object associated with this instance group :param class_id: class_id to render semantics :param dynamic: whether the instance group is dynamic + :param robot: The robot associated with this InstanceGroup :param use_pbr: whether to use PBR :param use_pbr_mapping: whether to use PBR mapping :param shadow_caster: whether to cast shadow @@ -674,18 +741,53 @@ def add_instance_group( id=len(self.instances), link_ids=link_ids, pybullet_uuid=pybullet_uuid, - ig_object=ig_object, class_id=class_id, poses_trans=poses_trans, poses_rot=poses_rot, dynamic=dynamic, - softbody=softbody, + robot=robot, use_pbr=use_pbr, use_pbr_mapping=use_pbr_mapping, shadow_caster=shadow_caster, ) self.instances.append(instance_group) - self.update_instance_id_to_pb_id_map() + + def add_robot( + self, object_ids, link_ids, poses_trans, poses_rot, pybullet_uuid=None, class_id=0, dynamic=False, robot=None + ): + """ + Create an instance group (a robot) for a list of visual objects and link it to pybullet + + :param object_ids: object ids of the visual objects + :param link_ids: link_ids in pybullet + :param poses_trans: initial translations for each visual object + :param poses_rot: initial rotation matrix for each visual object + :param pybullet_uuid: body id in pybullet + :param class_id: class_id to render semantics + :param dynamic: whether the instance group is dynamic + :param robot: The robot associated with this InstanceGroup + """ + + if self.optimization_process_executed and self.optimized: + logging.error( + "Using optimized renderer and optimization process is already excuted, cannot add new " "objects" + ) + return + + robot = Robot( + [self.visual_objects[object_id] for object_id in object_ids], + id=len(self.instances), + link_ids=link_ids, + pybullet_uuid=pybullet_uuid, + class_id=class_id, + poses_trans=poses_trans, + poses_rot=poses_rot, + dynamic=dynamic, + robot=robot, + use_pbr=False, + use_pbr_mapping=False, + ) + self.instances.append(robot) def add_text( self, @@ -703,7 +805,6 @@ def add_text( """ Creates a Text object with the given parameters. Returns the text object to the caller, so various settings can be changed - eg. text content, position, scale, etc. - :param text_data: starting text to display (can be changed at a later time by set_text) :param font_name: name of font to render - same as font folder in iGibson assets :param font_style: style of font - one of [regular, italic, bold] @@ -734,7 +835,7 @@ def add_text( def set_camera(self, camera, target, up, cache=False): """ - Set camera pose. + Set camera pose :param camera: camera position :param target: camera target @@ -756,7 +857,7 @@ def set_camera(self, camera, target, up, cache=False): def set_z_near_z_far(self, znear, zfar): """ - Set z limit for camera. + Set z limit for camera :param znear: lower limit for z :param zfar: upper limit for z @@ -780,7 +881,7 @@ def set_fov(self, fov): def set_light_color(self, color): """ - Set light color. + Set light color :param color: light color """ @@ -788,7 +889,7 @@ def set_light_color(self, color): def get_intrinsics(self): """ - Get camera intrinsics. + Get camera intrinsics :return: camera instrincs """ @@ -812,7 +913,7 @@ def get_intrinsics(self): def set_projection_matrix(self, fu, fv, u0, v0, znear, zfar): """ - Set projection matrix, given camera intrincs parameters. + Set projection matrix, given camera intrincs parameters """ w = self.width h = self.height @@ -872,7 +973,7 @@ def render( """ A function to render all the instances in the renderer and read the output from framebuffer. - :param modes: a tuple consisting of a subset of ('rgb', 'normal', 'seg', '3d', 'scene_flow', 'optical_flow') + :param modes: a tuple consisting of a subset of ('rgb', 'normal', 'seg', '3d', 'scene_flow', 'optical_flow'). :param hidden: hidden instances to skip. When rendering from a robot's perspective, it's own body can be hidden :param return_buffer: whether to return the frame buffers as numpy arrays :param render_shadow_pass: whether to render shadow @@ -1017,19 +1118,19 @@ def render_companion_window(self): def get_visual_objects(self): """ - Return visual objects. + Return visual objects """ return self.visual_objects def get_instances(self): """ - Return instances. + Return instances """ return self.instances def dump(self): """ - Dump instance vertex and face information. + Dump instance vertex and face information """ instances_vertices = [] instances_faces = [] @@ -1047,7 +1148,7 @@ def dump(self): def set_light_pos(self, light): """ - Set light position. + Set light position :param light: light position """ @@ -1055,13 +1156,13 @@ def set_light_pos(self, light): def get_num_objects(self): """ - Return the number of objects. + Return the number of objects """ return len(self.objects) def set_pose(self, pose, idx): """ - Set pose for a specific instance. + Set pose for a specific instance :param pose: instance pose :param idx: instance id @@ -1081,7 +1182,7 @@ def release(self): def clean(self): """ - Clean all the framebuffers, objects and instances. + Clean all the framebuffers, objects and instances """ clean_list = [ self.color_tex_rgb, @@ -1139,7 +1240,6 @@ def clean(self): self.faces = [] # GC should free things here self.visual_objects = [] self.instances = [] - self.update_instance_id_to_pb_id_map() self.vertex_data = [] self.shapes = [] save_path = os.path.join(igibson.ig_dataset_path, "tmp") @@ -1169,7 +1269,7 @@ def transform_point(self, vec): def transform_pose(self, pose): """ - Transform pose from world frame to camera frame. + Transform pose from world frame to camera frame :param pose: pose in world frame :return: pose in camera frame @@ -1182,15 +1282,15 @@ def transform_pose(self, pose): def render_active_cameras(self, modes=("rgb")): """ Render camera images for the active cameras. This is applicable for robosuite integration with iGibson, - where there are multiple cameras defined but only some are active (e.g., to switch between views with TAB). + where there are multiple cameras defined but only some are active (e.g., to switch between views with TAB) :return: a list of frames (number of modalities x number of robots) """ frames = [] hide_robot = self.rendering_settings.hide_robot for instance in self.instances: - if isinstance(instance.ig_object, BaseRobot): - for camera in instance.ig_object.cameras: + if isinstance(instance, Robot): + for camera in instance.robot.cameras: if camera.is_active(): camera_pose = camera.get_pose() camera_pos = camera_pose[:3] @@ -1206,19 +1306,18 @@ def render_active_cameras(self, modes=("rgb")): def render_robot_cameras(self, modes=("rgb")): """ - Render robot camera images. + Render robot camera images :return: a list of frames (number of modalities x number of robots) """ frames = [] for instance in self.instances: - if isinstance(instance.ig_object, BaseRobot): - camera_pos = instance.ig_object.eyes.get_position() - orn = instance.ig_object.eyes.get_orientation() + if isinstance(instance, Robot): + camera_pos = instance.robot.eyes.get_position() + orn = instance.robot.eyes.get_orientation() mat = quat2rotmat(xyzw2wxyz(orn))[:3, :3] view_direction = mat.dot(np.array([1, 0, 0])) - up_direction = mat.dot(np.array([0, 0, 1])) - self.set_camera(camera_pos, camera_pos + view_direction, up_direction, cache=True) + self.set_camera(camera_pos, camera_pos + view_direction, [0, 0, 1], cache=True) hidden_instances = [] if self.rendering_settings.hide_robot: hidden_instances.append(instance) @@ -1243,8 +1342,8 @@ def _get_names_active_cameras(self): """ names = [] for instance in self.instances: - if isinstance(instance.ig_object, BaseRobot): - for camera in instance.ig_object.cameras: + if isinstance(instance, Robot): + for camera in instance.robot.cameras: if camera.is_active(): names.append(camera.camera_name) return names @@ -1255,8 +1354,8 @@ def _switch_camera(self, idx): Applicable for integration with iGibson. """ for instance in self.instances: - if isinstance(instance.ig_object, BaseRobot): - instance.ig_object.cameras[idx].switch() + if isinstance(instance, Robot): + instance.robot.cameras[idx].switch() def _is_camera_active(self, idx): """ @@ -1264,8 +1363,8 @@ def _is_camera_active(self, idx): Applicable for integration with iGibson. """ for instance in self.instances: - if isinstance(instance.ig_object, BaseRobot): - return instance.ig_object.cameras[idx].is_active() + if isinstance(instance, Robot): + return instance.robot.cameras[idx].is_active() def _get_camera_name(self, idx): """ @@ -1273,15 +1372,15 @@ def _get_camera_name(self, idx): Applicable for integration with iGibson. """ for instance in self.instances: - if isinstance(instance.ig_object, BaseRobot): - return instance.ig_object.cameras[idx].camera_name + if isinstance(instance, Robot): + return instance.robot.cameras[idx].camera_name def optimize_vertex_and_texture(self): """ - Optimize vertex and texture for optimized renderer. + Optimize vertex and texture for optimized renderer """ - for tex_file in self.texture_files: - print("Texture: ", tex_file) + # for tex_file in self.texture_files: + # print("Texture: ", tex_file) # Set cutoff about 4096, otherwise we end up filling VRAM very quickly cutoff = 5000 * 5000 shouldShrinkSmallTextures = True @@ -1319,22 +1418,34 @@ def optimize_vertex_and_texture(self): hidden_array = [] for instance in self.instances: - id_sum = 0 - # Collect OR buffer indices over all visual objects in this group - temp_or_buffer_indices = [] - for vo in instance.objects: - ids = vo.VAO_ids + if isinstance(instance, Instance): + ids = instance.object.VAO_ids or_buffer_idx_start = len(duplicate_vao_ids) duplicate_vao_ids.extend(ids) or_buffer_idx_end = len(duplicate_vao_ids) - # Store indices in the duplicate vao ids array, and hence the optimized rendering buffers, that this InstanceGroup will use - temp_or_buffer_indices.extend(list(np.arange(or_buffer_idx_start, or_buffer_idx_end))) - id_sum += len(ids) - instance.or_buffer_indices = list(temp_or_buffer_indices) - class_id_array.extend([float(instance.class_id) / MAX_CLASS_COUNT] * id_sum) - instance_id_array.extend([float(instance.id) / MAX_INSTANCE_COUNT] * id_sum) - pbr_data_array.extend([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * id_sum) - hidden_array.extend([[float(instance.hidden), 1.0, 1.0, 1.0]] * id_sum) + # Store indices in the duplicate vao ids array, and hence the optimized rendering buffers, that this Instance will use + instance.or_buffer_indices = list(np.arange(or_buffer_idx_start, or_buffer_idx_end)) + class_id_array.extend([float(instance.class_id) / MAX_CLASS_COUNT] * len(ids)) + instance_id_array.extend([float(instance.id) / MAX_INSTANCE_COUNT] * len(ids)) + pbr_data_array.extend([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * len(ids)) + hidden_array.extend([[float(instance.hidden), 1.0, 1.0, 1.0]] * len(ids)) + elif isinstance(instance, InstanceGroup) or isinstance(instance, Robot): + id_sum = 0 + # Collect OR buffer indices over all visual objects in this group + temp_or_buffer_indices = [] + for vo in instance.objects: + ids = vo.VAO_ids + or_buffer_idx_start = len(duplicate_vao_ids) + duplicate_vao_ids.extend(ids) + or_buffer_idx_end = len(duplicate_vao_ids) + # Store indices in the duplicate vao ids array, and hence the optimized rendering buffers, that this InstanceGroup will use + temp_or_buffer_indices.extend(list(np.arange(or_buffer_idx_start, or_buffer_idx_end))) + id_sum += len(ids) + instance.or_buffer_indices = list(temp_or_buffer_indices) + class_id_array.extend([float(instance.class_id) / MAX_CLASS_COUNT] * id_sum) + instance_id_array.extend([float(instance.id) / MAX_INSTANCE_COUNT] * id_sum) + pbr_data_array.extend([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * id_sum) + hidden_array.extend([[float(instance.hidden), 1.0, 1.0, 1.0]] * id_sum) # Number of shapes in the OR buffer is equal to the number of duplicate vao_ids self.or_buffer_shape_num = len(duplicate_vao_ids) @@ -1495,7 +1606,7 @@ def optimize_vertex_and_texture(self): def update_optimized_texture_internal(self): """ - Update the texture_id for optimized renderer. + Update the texture_id for optimized renderer """ # Some of these may share visual data, but have unique transforms duplicate_vao_ids = [] @@ -1509,24 +1620,39 @@ def update_optimized_texture_internal(self): hidden_array = [] for instance in self.instances: - id_sum = 0 - # Collect OR buffer indices over all visual objects in this group - temp_or_buffer_indices = [] - for vo in instance.objects: - ids = vo.VAO_ids + if isinstance(instance, Instance): + ids = instance.object.VAO_ids or_buffer_idx_start = len(duplicate_vao_ids) duplicate_vao_ids.extend(ids) or_buffer_idx_end = len(duplicate_vao_ids) - # Store indices in the duplicate vao ids array, and hence the optimized rendering buffers, that this InstanceGroup will use - temp_or_buffer_indices.extend(list(np.arange(or_buffer_idx_start, or_buffer_idx_end))) - id_sum += len(ids) - instance.or_buffer_indices = list(temp_or_buffer_indices) - class_id_array.extend([float(instance.class_id) / MAX_CLASS_COUNT] * id_sum) - instance_id_array.extend([float(instance.id) / MAX_INSTANCE_COUNT] * id_sum) - pbr_data_array.extend([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * id_sum) - hidden_array.extend([[float(instance.hidden), 1.0, 1.0, 1.0]] * id_sum) + # Store indices in the duplicate vao ids array, and hence the optimized rendering buffers, that this Instance will use + instance.or_buffer_indices = list(np.arange(or_buffer_idx_start, or_buffer_idx_end)) + class_id_array.extend([float(instance.class_id) / MAX_CLASS_COUNT] * len(ids)) + instance_id_array.extend([float(instance.id) / MAX_INSTANCE_COUNT] * len(ids)) + pbr_data_array.extend([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * len(ids)) + hidden_array.extend([[float(instance.hidden), 1.0, 1.0, 1.0]] * len(ids)) + elif isinstance(instance, InstanceGroup) or isinstance(instance, Robot): + id_sum = 0 + # Collect OR buffer indices over all visual objects in this group + temp_or_buffer_indices = [] + for vo in instance.objects: + ids = vo.VAO_ids + or_buffer_idx_start = len(duplicate_vao_ids) + duplicate_vao_ids.extend(ids) + or_buffer_idx_end = len(duplicate_vao_ids) + # Store indices in the duplicate vao ids array, and hence the optimized rendering buffers, that this InstanceGroup will use + temp_or_buffer_indices.extend(list(np.arange(or_buffer_idx_start, or_buffer_idx_end))) + id_sum += len(ids) + instance.or_buffer_indices = list(temp_or_buffer_indices) + class_id_array.extend([float(instance.class_id) / MAX_CLASS_COUNT] * id_sum) + instance_id_array.extend([float(instance.id) / MAX_INSTANCE_COUNT] * id_sum) + pbr_data_array.extend([[float(instance.use_pbr), 1.0, 1.0, 1.0]] * id_sum) + hidden_array.extend([[float(instance.hidden), 1.0, 1.0, 1.0]] * id_sum) # Variables needed for multi draw elements call + index_ptr_offsets = [] + index_counts = [] + indices = [] diffuse_color_array = [] tex_num_array = [] tex_layer_array = [] @@ -1637,7 +1763,7 @@ def update_optimized_texture_internal(self): def update_hidden_highlight_state(self, instances): """ - Update the hidden state of a list of instances. + Updates the hidden state of a list of instances This function is called by instances and not every frame, since hiding is a very infrequent operation. """ @@ -1665,12 +1791,21 @@ def update_dynamic_positions(self, need_flow_info=False): :param need_flow_info: whether flow information is required """ for instance in self.instances: - buf_idxs = instance.or_buffer_indices - # Continue if instance has no visual objects - if not buf_idxs: - continue - self.trans_data[buf_idxs] = np.array(instance.poses_trans) - self.rot_data[buf_idxs] = np.array(instance.poses_rot) + # if instance.dynamic: + if isinstance(instance, Instance): + buf_idxs = instance.or_buffer_indices + # Continue if instance has no visual objects + if not buf_idxs: + continue + self.trans_data[buf_idxs] = np.array(instance.pose_trans) + self.rot_data[buf_idxs] = np.array(instance.pose_rot) + elif isinstance(instance, InstanceGroup) or isinstance(instance, Robot): + buf_idxs = instance.or_buffer_indices + # Continue if instance has no visual objects + if not buf_idxs: + continue + self.trans_data[buf_idxs] = np.array(instance.poses_trans) + self.rot_data[buf_idxs] = np.array(instance.poses_rot) if need_flow_info: # this part could be expensive @@ -1692,7 +1827,7 @@ def update_dynamic_positions(self, need_flow_info=False): def use_pbr(self, use_pbr, use_pbr_mapping): """ - Apply PBR setting to every instance. + Apply PBR setting to every instance :param use_pbr: whether to use pbr :param use_pbr_mapping: whether to use pbr mapping @@ -1703,7 +1838,7 @@ def use_pbr(self, use_pbr, use_pbr_mapping): def setup_lidar_param(self): """ - Set up LiDAR params. + Set up LiDAR params """ lidar_vertical_low = -15 / 180.0 * np.pi lidar_vertical_high = 15 / 180.0 * np.pi @@ -1735,8 +1870,7 @@ def setup_lidar_param(self): def get_lidar_from_depth(self): """ - Get partial LiDAR readings from depth sensors with limited FOV. - + Get partial LiDAR readings from depth sensors with limited FOV :return: partial LiDAR readings with limited FOV """ lidar_readings = self.render(modes=("3d"))[0] @@ -1748,20 +1882,18 @@ def get_lidar_from_depth(self): def get_lidar_all(self, offset_with_camera=np.array([0, 0, 0])): """ - Get complete LiDAR readings by patching together partial ones. - + Get complete LiDAR readings by patching together partial ones :param offset_with_camera: optionally place the lidar scanner - with an offset to the camera + with an offset to the camera :return: complete 360 degree LiDAR readings """ for instance in self.instances: - if isinstance(instance.ig_object, BaseRobot): - camera_pos = instance.ig_object.eyes.get_position() + offset_with_camera - orn = instance.ig_object.eyes.get_orientation() + if isinstance(instance, Robot): + camera_pos = instance.robot.eyes.get_position() + offset_with_camera + orn = instance.robot.eyes.get_orientation() mat = quat2rotmat(xyzw2wxyz(orn))[:3, :3] view_direction = mat.dot(np.array([1, 0, 0])) - up_direction = mat.dot(np.array([0, 0, 1])) - self.set_camera(camera_pos, camera_pos + view_direction, up_direction) + self.set_camera(camera_pos, camera_pos + view_direction, [0, 0, 1]) original_fov = self.vertical_fov self.set_fov(90) @@ -1799,35 +1931,25 @@ def get_cube(self, mode="rgb", use_robot_camera=False): """ :param mode: simulator rendering mode, 'rgb' or '3d' :param use_robot_camera: use the camera pose from robot + :return: List of sensor readings, normalized to [0.0, 1.0], ordered as [F, R, B, L, U, D] * n_cameras """ - # Cache the original fov and V to be restored later - original_fov = self.vertical_fov - original_V = np.copy(self.V) - - # Set fov to be 90 degrees + orig_fov = self.vertical_fov self.set_fov(90) + org_V = np.copy(self.V) - # Compute initial_V that will be used to render in 6 directions, based on whether use_robot_camera is True if use_robot_camera: for instance in self.instances: - if isinstance(instance.ig_object, BaseRobot): - camera_pos = instance.ig_object.eyes.get_position() - orn = instance.ig_object.eyes.get_orientation() + if isinstance(instance, Robot): + camera_pos = instance.robot.eyes.get_position() + orn = instance.robot.eyes.get_orientation() mat = quat2rotmat(xyzw2wxyz(orn))[:3, :3] view_direction = mat.dot(np.array([1, 0, 0])) - up_direction = mat.dot(np.array([0, 0, 1])) - self.set_camera(camera_pos, camera_pos + view_direction, up_direction) - initial_V = np.copy(self.V) - else: - initial_V = original_V + self.set_camera(camera_pos, camera_pos + view_direction, [0, 0, 1]) def render_cube(): - # Store 6 frames in 6 directions frames = [] - - # Forward, backward, left, right r = np.array( [ [ @@ -1849,45 +1971,30 @@ def render_cube(): # Up r_up = np.array([[1, 0, 0, 0], [0, 0, -1, 0], [0, -1, 0, 0], [0, 0, 0, 1]]) - self.V = r_up.dot(initial_V) + self.V = r_up.dot(org_V) frames.append(self.render(modes=(mode))[0]) - # Down r_down = np.array([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]) - self.V = r_down.dot(initial_V) + # Down + self.V = r_down.dot(org_V) frames.append(self.render(modes=(mode))[0]) return frames frames = render_cube() - - # Restore original fov and V - self.V = original_V - self.set_fov(original_fov) - + self.V = org_V + self.set_fov(orig_fov) return frames def get_equi(self, mode="rgb", use_robot_camera=False): """ - Generate panorama images :param mode: simulator rendering mode, 'rgb' or '3d' :param use_robot_camera: use the camera pose from robot :return: List of sensor readings, normalized to [0.0, 1.0], ordered as [F, R, B, L, U, D] """ frames = self.get_cube(mode=mode, use_robot_camera=use_robot_camera) frames = [frames[0], frames[1][:, ::-1, :], frames[2][:, ::-1, :], frames[3], frames[4], frames[5]] - try: - equi = py360convert.c2e(cubemap=frames, h=frames[0].shape[0], w=frames[0].shape[0] * 2, cube_format="list") - except AssertionError: - raise ValueError("Something went wrong during getting cubemap. Is the image size not a square?") + equi = py360convert.c2e(cubemap=frames, h=frames[0].shape[0], w=frames[0].shape[0] * 2, cube_format="list") return equi - - def update_instance_id_to_pb_id_map(self): - self.instance_id_to_pb_id = np.full((MAX_INSTANCE_COUNT,), -1) - for inst in self.instances: - self.instance_id_to_pb_id[inst.id] = inst.pybullet_uuid if inst.pybullet_uuid is not None else -1 - - def get_pb_ids_for_instance_ids(self, instance_ids): - return self.instance_id_to_pb_id[instance_ids.astype(int)] diff --git a/igibson/render/mesh_renderer/mesh_renderer_settings.py b/igibson/render/mesh_renderer/mesh_renderer_settings.py index 3c4bf021c..99c20bf25 100644 --- a/igibson/render/mesh_renderer/mesh_renderer_settings.py +++ b/igibson/render/mesh_renderer/mesh_renderer_settings.py @@ -1,4 +1,3 @@ -import logging import os import platform @@ -26,8 +25,6 @@ def __init__( show_glfw_window=False, blend_highlight=False, is_robosuite=False, - glsl_version_override=460, - load_textures=True, ): """ :param use_fisheye: whether to use fisheye camera @@ -48,26 +45,14 @@ def __init__( :param show_glfw_window: whether to show glfw window (default false) :param blend_highlight: blend highlight of objects into RGB image :param is_robosuite: whether the environment is of robosuite. - :param glsl_version_override: for backwards compatibility only. Options are 450 or 460. - :param load_textures: Whether textures should be loaded. Set to False if not using RGB modality to save memory. """ self.use_fisheye = use_fisheye self.msaa = msaa + self.enable_shadow = enable_shadow self.env_texture_filename = env_texture_filename self.env_texture_filename2 = env_texture_filename2 self.env_texture_filename3 = env_texture_filename3 - - if platform.system() == "Darwin": - if optimized: - logging.warning("Darwin does not support optimized renderer, automatically disabling") - if enable_shadow: - logging.warning("Darwin does not support shadow, automatically disabling") - self.optimized = False - self.enable_shadow = False - else: - self.optimized = optimized - self.enable_shadow = enable_shadow - + self.optimized = optimized self.skybox_size = skybox_size self.light_modulation_map_filename = light_modulation_map_filename self.light_dimming_factor = light_dimming_factor @@ -78,8 +63,6 @@ def __init__( self.show_glfw_window = show_glfw_window self.blend_highlight = blend_highlight self.is_robosuite = is_robosuite - self.load_textures = load_textures - self.glsl_version_override = glsl_version_override if glfw_gl_version is not None: self.glfw_gl_version = glfw_gl_version @@ -87,7 +70,7 @@ def __init__( if platform.system() == "Darwin": self.glfw_gl_version = [4, 1] else: - self.glfw_gl_version = [4, 6] + self.glfw_gl_version = [4, 5] def get_fastest(self): self.msaa = False diff --git a/igibson/render/mesh_renderer/mesh_renderer_tensor.py b/igibson/render/mesh_renderer/mesh_renderer_tensor.py index cf1621c4b..ea36c880d 100644 --- a/igibson/render/mesh_renderer/mesh_renderer_tensor.py +++ b/igibson/render/mesh_renderer/mesh_renderer_tensor.py @@ -1,5 +1,4 @@ import logging -import platform from igibson.render.mesh_renderer.get_available_devices import get_cuda_device from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer, MeshRendererSettings @@ -23,8 +22,6 @@ def __init__( rendering_settings=MeshRendererSettings(), simulator=None, ): - if platform.system() != "Linux": - raise Exception("Rendering to pytorch tensor is only available on Linux.") super(MeshRendererG2G, self).__init__( width, height, vertical_fov, device_idx, rendering_settings, simulator ) @@ -33,8 +30,7 @@ def __init__( with torch.cuda.device(self.cuda_idx): self.image_tensor = torch.cuda.ByteTensor(height, width, 4).cuda() self.normal_tensor = torch.cuda.ByteTensor(height, width, 4).cuda() - self.seg_tensor = torch.cuda.FloatTensor(height, width, 4).cuda() - self.ins_seg_tensor = torch.cuda.FloatTensor(height, width, 4).cuda() + self.seg_tensor = torch.cuda.ByteTensor(height, width, 4).cuda() self.pc_tensor = torch.cuda.FloatTensor(height, width, 4).cuda() self.optical_flow_tensor = torch.cuda.FloatTensor(height, width, 4).cuda() self.scene_flow_tensor = torch.cuda.FloatTensor(height, width, 4).cuda() @@ -65,14 +61,6 @@ def readbuffer_to_tensor(self, modes=AVAILABLE_MODALITIES): int(self.color_tex_semantics), int(self.width), int(self.height), self.seg_tensor.data_ptr() ) results.append(self.seg_tensor.clone()) - elif mode == "ins_seg": - self.r.map_tensor( - int(self.color_tex_ins_seg), - int(self.width), - int(self.height), - self.ins_seg_tensor.data_ptr(), - ) - results.append(self.ins_seg_tensor.clone()) elif mode == "3d": self.r.map_tensor_float( int(self.color_tex_3d), int(self.width), int(self.height), self.pc_tensor.data_ptr() @@ -97,7 +85,7 @@ def readbuffer_to_tensor(self, modes=AVAILABLE_MODALITIES): return results - def render(self, modes=AVAILABLE_MODALITIES, hidden=(), render_shadow_pass=True): + def render(self, modes=AVAILABLE_MODALITIES, hidden=(), return_buffer=True, render_shadow_pass=True): """ A function to render all the instances in the renderer and read the output from framebuffer into pytorch tensor. @@ -110,6 +98,7 @@ def render(self, modes=AVAILABLE_MODALITIES, hidden=(), render_shadow_pass=True) ) return self.readbuffer_to_tensor(modes) + except ImportError: - logging.warning("torch is not available, falling back to rendering to memory (instead of tensor)") + print("torch is not available, falling back to rendering to memory(instead of tensor)") MeshRendererG2G = MeshRenderer diff --git a/igibson/render/mesh_renderer/mesh_renderer_vr.py b/igibson/render/mesh_renderer/mesh_renderer_vr.py index d77e1e58f..ee7e9086e 100644 --- a/igibson/render/mesh_renderer/mesh_renderer_vr.py +++ b/igibson/render/mesh_renderer/mesh_renderer_vr.py @@ -2,7 +2,6 @@ import time from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer, MeshRendererSettings -from igibson.utils.constants import AVAILABLE_MODALITIES from igibson.utils.utils import dump_config, parse_config, parse_str_config @@ -262,10 +261,7 @@ def __init__(self, rendering_settings=MeshRendererSettings(), vr_settings=VrSett self.width = 1296 self.height = 1440 super().__init__( - width=self.width, - height=self.height, - rendering_settings=self.vr_rendering_settings, - simulator=simulator, + width=self.width, height=self.height, rendering_settings=self.vr_rendering_settings, simulator=simulator ) # Rename self.r to self.vrsys @@ -278,6 +274,7 @@ def __init__(self, rendering_settings=MeshRendererSettings(), vr_settings=VrSett # Always turn MSAA off for VR self.msaa = False + # The VrTextOverlay that serves as the VR HUD (heads-up-display) self.vr_hud = None def gen_vr_hud(self): @@ -304,9 +301,7 @@ def update_vr_data(self): """ self.vrsys.updateVRData() - def render( - self, modes=AVAILABLE_MODALITIES, hidden=(), return_buffer=True, render_shadow_pass=True, render_text_pass=True - ): + def render(self, return_frame=False): """ Renders VR scenes. """ @@ -323,7 +318,7 @@ def render( [right_cam_pos[0], right_cam_pos[1], 10], [right_cam_pos[0], right_cam_pos[1], 0] ) - super().render(modes=("rgb"), return_buffer=False) + super().render(modes=("rgb"), return_buffer=False, render_shadow_pass=True) self.vrsys.postRenderVRForEye("left", self.color_tex_rgb) # Render and submit right eye self.V = right_view @@ -336,10 +331,13 @@ def render( self.vrsys.postRenderVRForEye("right", self.color_tex_rgb) # Update HUD so it renders in the HMD - if self.vr_hud is not None: + if self.vr_hud: self.vr_hud.refresh_text() else: - return super().render(modes=("rgb"), return_buffer=return_buffer) + if return_frame: + return super().render(modes=("rgb"), return_buffer=return_frame, render_shadow_pass=True) + else: + super().render(modes=("rgb"), return_buffer=False, render_shadow_pass=True) def vr_compositor_update(self): """ diff --git a/igibson/render/mesh_renderer/shaders/450/optimized_vert.shader b/igibson/render/mesh_renderer/shaders/450/optimized_vert.shader index 043f57d17..4c3636b6e 100644 --- a/igibson/render/mesh_renderer/shaders/450/optimized_vert.shader +++ b/igibson/render/mesh_renderer/shaders/450/optimized_vert.shader @@ -1,4 +1,4 @@ -#version 450 +#version 460 #define MAX_ARRAY_SIZE 1024 @@ -26,8 +26,10 @@ layout (std140) uniform UVData { vec4 uv_transform_param[MAX_ARRAY_SIZE]; }; - -in int gl_DrawID; +// Commenting out this line (and changing the version to 460) +// allows the OpenGL to compile this regardless of the +// Nvidia driver version +// in int gl_DrawID; uniform mat4 V; uniform mat4 last_V; diff --git a/igibson/render/mesh_renderer/shaders/460/equirect2cube_cs.glsl b/igibson/render/mesh_renderer/shaders/460/equirect2cube_cs.glsl deleted file mode 100644 index 3de1d2398..000000000 --- a/igibson/render/mesh_renderer/shaders/460/equirect2cube_cs.glsl +++ /dev/null @@ -1,55 +0,0 @@ -#version 460 core -// Physically Based Rendering -// Copyright (c) 2017-2018 Michał Siejak - -// Converts equirectangular (lat-long) projection texture into a proper cubemap. - -const float PI = 3.141592; -const float TwoPI = 2 * PI; -uniform float light_dimming_factor; - -#if VULKAN -layout(set=0, binding=0) uniform sampler2D inputTexture; -layout(set=0, binding=1, rgba16f) restrict writeonly uniform imageCube outputTexture; -#else -layout(binding=0) uniform sampler2D inputTexture; -layout(binding=0, rgba16f) restrict writeonly uniform imageCube outputTexture; -#endif // VULKAN - -// Calculate normalized sampling direction vector based on current fragment coordinates (gl_GlobalInvocationID.xyz). -// This is essentially "inverse-sampling": we reconstruct what the sampling vector would be if we wanted it to "hit" -// this particular fragment in a cubemap. -// See: OpenGL core profile specs, section 8.13. -vec3 getSamplingVector() -{ - vec2 st = gl_GlobalInvocationID.xy/vec2(imageSize(outputTexture)); - vec2 uv = 2.0 * vec2(st.x, 1.0-st.y) - vec2(1.0); - - vec3 ret; - // Select vector based on cubemap face index. - // Sadly 'switch' doesn't seem to work, at least on NVIDIA. - if(gl_GlobalInvocationID.z == 0) ret = vec3(1.0, uv.y, -uv.x); - else if(gl_GlobalInvocationID.z == 1) ret = vec3(-1.0, uv.y, uv.x); - else if(gl_GlobalInvocationID.z == 2) ret = vec3(uv.x, 1.0, -uv.y); - else if(gl_GlobalInvocationID.z == 3) ret = vec3(uv.x, -1.0, uv.y); - else if(gl_GlobalInvocationID.z == 4) ret = vec3(uv.x, uv.y, 1.0); - else if(gl_GlobalInvocationID.z == 5) ret = vec3(-uv.x, uv.y, -1.0); - return normalize(ret); -} - -layout(local_size_x=32, local_size_y=32, local_size_z=1) in; -void main(void) -{ - vec3 v = getSamplingVector(); - - // Convert Cartesian direction vector to spherical coordinates. - float phi = atan(v.y, v.x); // here we use z-up convention, so y and z are swapped from original code - float theta = acos(v.z); // here we use z-up convention, so y and z are swapped from original code - - // Sample equirectangular texture. - vec4 color = texture(inputTexture, vec2(phi/TwoPI, theta/PI)); - color.xyz = color.xyz * light_dimming_factor; - - // Write out color to output cubemap. - imageStore(outputTexture, ivec3(gl_GlobalInvocationID), color); -} diff --git a/igibson/render/mesh_renderer/shaders/460/frag.shader b/igibson/render/mesh_renderer/shaders/460/frag.shader deleted file mode 100644 index 7cf906c49..000000000 --- a/igibson/render/mesh_renderer/shaders/460/frag.shader +++ /dev/null @@ -1,232 +0,0 @@ -#version 460 -uniform sampler2D texUnit; -uniform sampler2D metallicTexture; -uniform sampler2D roughnessTexture; -uniform sampler2D normalTexture; - -uniform samplerCube specularTexture; -uniform samplerCube irradianceTexture; -uniform sampler2D specularBRDF_LUT; - -uniform samplerCube specularTexture2; -uniform samplerCube irradianceTexture2; -uniform sampler2D specularBRDF_LUT2; - -uniform sampler2D lightModulationMap; - -uniform vec3 eyePosition; - -uniform float use_texture; -uniform float use_pbr; -uniform float use_pbr_mapping; -uniform float use_two_light_probe; -uniform float metallic; -uniform float roughness; - -uniform sampler2D depthMap; -uniform int shadow_pass; - -in vec2 theCoords; -in vec3 Normal_world; -in vec3 Normal_cam; -in vec3 FragPos; -in vec3 Semantic_seg_color; -in vec3 Instance_seg_color; -in vec3 Pos_cam; -in vec3 Pos_cam_prev; -in vec3 Pos_cam_projected; -in vec3 Diffuse_color; -in mat3 TBN; -in vec4 FragPosLightSpace; -in vec2 Optical_flow; - -const float PI = 3.141592; -const float Epsilon = 0.00001; - -layout (location = 0) out vec4 outputColour; -layout (location = 1) out vec4 NormalColour; -layout (location = 2) out vec4 SemanticSegColour; -layout (location = 3) out vec4 InstanceSegColour; -layout (location = 4) out vec4 PCColour; -layout (location = 5) out vec4 SceneFlowColour; -layout (location = 6) out vec4 OpticalFlowColour; - -uniform vec3 light_position; // in world coordinate -uniform vec3 light_color; // light color -float ndfGGX(float cosLh, float roughness) -{ - float alpha = roughness * roughness; - float alphaSq = alpha * alpha; - - float denom = (cosLh * cosLh) * (alphaSq - 1.0) + 1.0; - return alphaSq / (PI * denom * denom); -} - -// Single term for separable Schlick-GGX below. -float gaSchlickG1(float cosTheta, float k) -{ - return cosTheta / (cosTheta * (1.0 - k) + k); -} - -// Schlick-GGX approximation of geometric attenuation function using Smith's method. -float gaSchlickGGX(float cosLi, float cosLo, float roughness) -{ - float r = roughness + 1.0; - float k = (r * r) / 8.0; // Epic suggests using this roughness remapping for analytic lights. - return gaSchlickG1(cosLi, k) * gaSchlickG1(cosLo, k); -} - -// Shlick's approximation of the Fresnel factor. -vec3 fresnelSchlick(vec3 F0, float cosTheta) -{ - return F0 + (vec3(1.0) - F0) * pow(1.0 - cosTheta, 5.0); -} - -void main() { - vec3 lightDir = vec3(0,0,1);//normalize(light_position); - //sunlight pointing to z direction - float diff = 0.5 + 0.5 * max(dot(Normal_world, lightDir), 0.0); - vec3 diffuse = diff * light_color; - vec2 texelSize = 1.0 / textureSize(depthMap, 0); - - float shadow; - if (shadow_pass == 2) { - vec3 projCoords = FragPosLightSpace.xyz / FragPosLightSpace.w; - projCoords = projCoords * 0.5 + 0.5; - - float cosTheta = dot(Normal_world, lightDir); - cosTheta = clamp(cosTheta, 0.0, 1.0); - float bias = 0.005*tan(acos(cosTheta)); - bias = clamp(bias, 0.001 ,0.1); - float currentDepth = projCoords.z; - float closestDepth = 0; - - shadow = 0.0; - float current_shadow = 0; - - for(int x = -2; x <= 2; ++x) - { - for (int y = -2; y <= 2; ++y) - { - closestDepth = texture(depthMap, projCoords.xy + vec2(x, y) * texelSize).b * 0.5 + 0.5; - current_shadow = currentDepth - bias > closestDepth ? 1.0 : 0.0; - if ((projCoords.z > 1.0) || (projCoords.x > 1.0) || (projCoords.y > 1.0) - || (projCoords.x < 0) || (projCoords.y < 0)) current_shadow = 0.0; - shadow += current_shadow; - } - } - shadow /= 25.0; - } - else { - shadow = 0.0; - } - - //not using pbr - if (use_pbr == 0) { - if (use_texture == 1) { - outputColour = texture(texUnit, theCoords);// albedo only - } else { - outputColour = vec4(Diffuse_color,1) * diff; //diffuse color - } - } - - //use pbr, not using mapping - if ((use_pbr == 1) && (use_pbr_mapping == 0)) { - - vec3 N = normalize(Normal_world); - vec3 Lo = normalize(eyePosition - FragPos); - float cosLo = max(0.0, dot(N, Lo)); - vec3 Lr = 2.0 * cosLo * N - Lo; - - vec3 albedo; - if (use_texture == 1) { - albedo = texture(texUnit, theCoords).rgb;// albedo only - } else { - albedo = Diffuse_color; //diffuse color - } - - const vec3 Fdielectric = vec3(0.04); - vec3 F0 = mix(Fdielectric, albedo, metallic); - vec3 irradiance = texture(irradianceTexture, N).rgb; - vec3 F = fresnelSchlick(F0, cosLo); - vec3 kd = mix(vec3(1.0) - F, vec3(0.0), metallic); - vec3 diffuseIBL = kd * albedo * irradiance; - int specularTextureLevels = textureQueryLevels(specularTexture); - vec3 specularIrradiance = textureLod(specularTexture, Lr, roughness * specularTextureLevels).rgb; - vec2 specularBRDF = texture(specularBRDF_LUT, vec2(cosLo, roughness)).rg; - vec3 specularIBL = (F0 * specularBRDF.x + specularBRDF.y) * specularIrradiance; - vec3 ambientLighting = diffuseIBL + specularIBL; - //vec3 reflection = textureLod(specularTexture, vec3(Lr.x, Lr.z, Lr.y), 1).rgb; - outputColour = vec4(ambientLighting, 1); - - } - - // use pbr and mapping - if ((use_pbr == 1) && (use_pbr_mapping == 1)) { - vec3 normal_map = 2 * texture(normalTexture, theCoords).rgb - 1; - mat3 TBN2 = TBN; - if (!gl_FrontFacing) { - TBN2 = mat3(TBN[0], TBN[1], -TBN[2]); - } - vec3 N = normalize(TBN2 * normal_map); - - vec3 Lo = normalize(eyePosition - FragPos); - float cosLo = max(0.0, dot(N, Lo)); - vec3 Lr = 2.0 * cosLo * N - Lo; - - vec3 albedo; - if (use_texture == 1) { - albedo = texture(texUnit, theCoords).rgb;// albedo only - } else { - albedo = Diffuse_color; //diffuse color - } - - float metallic_sampled = texture(metallicTexture, theCoords).r; - float roughness_sampled = texture(roughnessTexture, theCoords).r; - const vec3 Fdielectric = vec3(0.04); - vec3 F0 = mix(Fdielectric, albedo, metallic_sampled); - vec3 irradiance; - - float modulate_factor; - - if (use_two_light_probe == 1) { - vec4 room = texture(lightModulationMap, vec2((FragPos.x + 15.0)/30.0, - (FragPos.y + 15.0)/30.0)); - modulate_factor = room.r; - } else { - modulate_factor = 1.0; - } - irradiance = texture(irradianceTexture, N).rgb * modulate_factor + - (1-modulate_factor) * texture(irradianceTexture2, N).rgb; - vec3 F = fresnelSchlick(F0, cosLo); - vec3 kd = mix(vec3(1.0) - F, vec3(0.0), metallic_sampled); - vec3 diffuseIBL = kd * albedo * irradiance; - int specularTextureLevels = textureQueryLevels(specularTexture); - int specularTextureLevels2 = textureQueryLevels(specularTexture2); - vec3 specularIrradiance; - vec2 specularBRDF; - specularIrradiance = textureLod(specularTexture, Lr, roughness_sampled * specularTextureLevels).rgb * - modulate_factor + textureLod(specularTexture2, Lr, roughness_sampled * specularTextureLevels2).rgb * - (1-modulate_factor); - - specularBRDF = texture(specularBRDF_LUT, vec2(cosLo, roughness_sampled)).rg * modulate_factor + - texture(specularBRDF_LUT2, vec2(cosLo, roughness_sampled)).rg * (1-modulate_factor); - vec3 specularIBL = (F0 * specularBRDF.x + specularBRDF.y) * specularIrradiance; - vec3 ambientLighting = diffuseIBL + specularIBL; - //vec3 reflection = textureLod(specularTexture, vec3(Lr.x, Lr.z, Lr.y), 1).rgb; - outputColour = vec4(ambientLighting, 1); - } - - NormalColour = vec4((Normal_cam + 1) / 2,1); - SemanticSegColour = vec4(Semantic_seg_color, 1); - InstanceSegColour = vec4(Instance_seg_color, 1); - if (shadow_pass == 1) { - PCColour = vec4(Pos_cam_projected, 1); - } else { - PCColour = vec4(Pos_cam, 1); - } - outputColour = outputColour * (1 - shadow * 0.5); - SceneFlowColour = vec4(Pos_cam - Pos_cam_prev,1); - OpticalFlowColour = vec4(Optical_flow,0,1); - -} diff --git a/igibson/render/mesh_renderer/shaders/460/irmap_cs.glsl b/igibson/render/mesh_renderer/shaders/460/irmap_cs.glsl deleted file mode 100644 index 6e46e9ce2..000000000 --- a/igibson/render/mesh_renderer/shaders/460/irmap_cs.glsl +++ /dev/null @@ -1,106 +0,0 @@ -#version 460 core -// Physically Based Rendering -// Copyright (c) 2017-2018 Michał Siejak - -// Computes diffuse irradiance cubemap convolution for image-based lighting. -// Uses quasi Monte Carlo sampling with Hammersley sequence. - -const float PI = 3.141592; -const float TwoPI = 2 * PI; -const float Epsilon = 0.00001; - -const uint NumSamples = 64 * 1024; -const float InvNumSamples = 1.0 / float(NumSamples); - -layout(binding=0) uniform samplerCube inputTexture; -layout(binding=0, rgba16f) restrict writeonly uniform imageCube outputTexture; - -// Compute Van der Corput radical inverse -// See: http://holger.dammertz.org/stuff/notes_HammersleyOnHemisphere.html -float radicalInverse_VdC(uint bits) -{ - bits = (bits << 16u) | (bits >> 16u); - bits = ((bits & 0x55555555u) << 1u) | ((bits & 0xAAAAAAAAu) >> 1u); - bits = ((bits & 0x33333333u) << 2u) | ((bits & 0xCCCCCCCCu) >> 2u); - bits = ((bits & 0x0F0F0F0Fu) << 4u) | ((bits & 0xF0F0F0F0u) >> 4u); - bits = ((bits & 0x00FF00FFu) << 8u) | ((bits & 0xFF00FF00u) >> 8u); - return float(bits) * 2.3283064365386963e-10; // / 0x100000000 -} - -// Sample i-th point from Hammersley point set of NumSamples points total. -vec2 sampleHammersley(uint i) -{ - return vec2(i * InvNumSamples, radicalInverse_VdC(i)); -} - -// Uniformly sample point on a hemisphere. -// Cosine-weighted sampling would be a better fit for Lambertian BRDF but since this -// compute shader runs only once as a pre-processing step performance is not *that* important. -// See: "Physically Based Rendering" 2nd ed., section 13.6.1. -vec3 sampleHemisphere(float u1, float u2) -{ - const float u1p = sqrt(max(0.0, 1.0 - u1*u1)); - return vec3(cos(TwoPI*u2) * u1p, sin(TwoPI*u2) * u1p, u1); -} - -// Calculate normalized sampling direction vector based on current fragment coordinates (gl_GlobalInvocationID.xyz). -// This is essentially "inverse-sampling": we reconstruct what the sampling vector would be if we wanted it to "hit" -// this particular fragment in a cubemap. -// See: OpenGL core profile specs, section 8.13. -vec3 getSamplingVector() -{ - vec2 st = gl_GlobalInvocationID.xy/vec2(imageSize(outputTexture)); - vec2 uv = 2.0 * vec2(st.x, 1.0-st.y) - vec2(1.0); - - vec3 ret; - // Sadly 'switch' doesn't seem to work, at least on NVIDIA. - if(gl_GlobalInvocationID.z == 0) ret = vec3(1.0, uv.y, -uv.x); - else if(gl_GlobalInvocationID.z == 1) ret = vec3(-1.0, uv.y, uv.x); - else if(gl_GlobalInvocationID.z == 2) ret = vec3(uv.x, 1.0, -uv.y); - else if(gl_GlobalInvocationID.z == 3) ret = vec3(uv.x, -1.0, uv.y); - else if(gl_GlobalInvocationID.z == 4) ret = vec3(uv.x, uv.y, 1.0); - else if(gl_GlobalInvocationID.z == 5) ret = vec3(-uv.x, uv.y, -1.0); - return normalize(ret); -} - -// Compute orthonormal basis for converting from tanget/shading space to world space. -void computeBasisVectors(const vec3 N, out vec3 S, out vec3 T) -{ - // Branchless select non-degenerate T. - T = cross(N, vec3(0.0, 1.0, 0.0)); - T = mix(cross(N, vec3(1.0, 0.0, 0.0)), T, step(Epsilon, dot(T, T))); - - T = normalize(T); - S = normalize(cross(N, T)); -} - -// Convert point from tangent/shading space to world space. -vec3 tangentToWorld(const vec3 v, const vec3 N, const vec3 S, const vec3 T) -{ - return S * v.x + T * v.y + N * v.z; -} - -layout(local_size_x=32, local_size_y=32, local_size_z=1) in; -void main(void) -{ - vec3 N = getSamplingVector(); - - vec3 S, T; - computeBasisVectors(N, S, T); - - // Monte Carlo integration of hemispherical irradiance. - // As a small optimization this also includes Lambertian BRDF assuming perfectly white surface (albedo of 1.0) - // so we don't need to normalize in PBR fragment shader (so technically it encodes exitant radiance rather than irradiance). - vec3 irradiance = vec3(0); - for(uint i=0; i